summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio6
-rw-r--r--Documentation/DocBook/drm.tmpl2
-rw-r--r--Documentation/arm/SPEAr/overview.txt2
-rw-r--r--Documentation/device-mapper/cache.txt6
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt26
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/hwmon/nct79044
-rw-r--r--Documentation/input/alps.txt6
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS141
-rw-r--r--Makefile20
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/Kconfig16
-rw-r--r--arch/arc/Makefile13
-rw-r--r--arch/arc/boot/dts/axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi2
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/bitops.h35
-rw-r--r--arch/arc/include/asm/futex.h48
-rw-r--r--arch/arc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/include/asm/ptrace.h52
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/intc-arcv2.c1
-rw-r--r--arch/arc/kernel/intc-compact.c1
-rw-r--r--arch/arc/kernel/mcip.c23
-rw-r--r--arch/arc/kernel/setup.c27
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/kernel/troubleshoot.c1
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/mm/cache.c12
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts16
-rw-r--r--arch/arm/boot/dts/cros-ec-keyboard.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts5
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts5
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi1
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx27.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi9
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts8
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-gk802.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts10
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2e.dtsi18
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi14
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5.dtsi5
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts26
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi2
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear300.dtsi2
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear320.dtsi2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-ccu8540.dts7
-rw-r--r--arch/arm/boot/dts/ste-ccu9540.dts7
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi59
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi25
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts25
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/reboot.c2
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-imx/gpc.c27
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/mach-pxa/capc7117.c3
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c3
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/trizeps4.c3
-rw-r--r--arch/arm/mach-pxa/vpac270.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear/pl080.c2
-rw-r--r--arch/arm/mach-spear/pl080.h2
-rw-r--r--arch/arm/mach-spear/restart.c2
-rw-r--r--arch/arm/mach-spear/spear1310.c2
-rw-r--r--arch/arm/mach-spear/spear1340.c2
-rw-r--r--arch/arm/mach-spear/spear13xx.c2
-rw-r--r--arch/arm/mach-spear/spear300.c2
-rw-r--r--arch/arm/mach-spear/spear310.c2
-rw-r--r--arch/arm/mach-spear/spear320.c2
-rw-r--r--arch/arm/mach-spear/spear3xx.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/io.h5
-rw-r--r--arch/m32r/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m68k/Kconfig.cpu49
-rw-r--r--arch/m68k/configs/m5208evb_defconfig22
-rw-r--r--arch/m68k/configs/m5249evb_defconfig17
-rw-r--r--arch/m68k/configs/m5272c3_defconfig14
-rw-r--r--arch/m68k/configs/m5275evb_defconfig19
-rw-r--r--arch/m68k/configs/m5307c3_defconfig21
-rw-r--r--arch/m68k/configs/m5407c3_defconfig17
-rw-r--r--arch/m68k/configs/m5475evb_defconfig9
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/coldfire.h2
-rw-r--r--arch/m68k/include/asm/io_mm.h3
-rw-r--r--arch/m68k/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/Makefile7
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h3
-rw-r--r--arch/mips/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h4
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/Kconfig5
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/common/bus_watcher.c5
-rw-r--r--arch/mips/sibyte/sb1250/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/openrisc/Kconfig4
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/pgalloc.h3
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/ctl_reg.h5
-rw-r--r--arch/s390/include/asm/hugetlb.h1
-rw-r--r--arch/s390/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/s390/include/asm/page.h8
-rw-r--r--arch/s390/include/asm/perf_event.h8
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/nmi.c51
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/sclp.S4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.debug12
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/entry_64.S299
-rw-r--r--arch/x86/entry/entry_64_compat.S17
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/fpu/types.h72
-rw-r--r--arch/x86/include/asm/intel_pmc_ipc.h27
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h56
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c16
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/init.c53
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/nmi.c123
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--arch/x86/kernel/step.c8
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/iommu.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c110
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c33
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/mm-arch-hooks.h15
-rw-r--r--block/bio-integrity.c4
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c146
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/resource.c24
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/libata-core.c45
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/block/nvme-core.c13
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c8
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/cpufreq.c118
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c6
-rw-r--r--drivers/cpufreq/freq_table.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c6
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c7
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c70
-rw-r--r--drivers/crypto/nx/nx-sha512.c72
-rw-r--r--drivers/crypto/nx/nx.c3
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpio/gpio-brcmstb.c14
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-max732x.c1
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c35
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c121
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c60
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c21
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c67
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c67
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c3
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c76
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/i2c-core.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.c2
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/twl4030-madc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c11
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c18
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/cm3323.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/stk3310.c75
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c47
-rw-r--r--drivers/iio/proximity/sx9500.c28
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp006.c3
-rw-r--r--drivers/infiniband/core/agent.c4
-rw-r--r--drivers/infiniband/core/cm.c61
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c47
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/core/smi.c37
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c58
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c56
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c23
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c71
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elantech.c35
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c111
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/io.c1
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c14
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c4
-rw-r--r--drivers/md/dm-cache-target.c37
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c53
-rw-r--r--drivers/md/dm.c39
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c23
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c9
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c139
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c16
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c86
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/rcar_can.c16
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c191
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c99
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c109
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c172
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c59
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c51
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c63
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c191
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c74
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c68
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c217
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/x86/dell-laptop.c171
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c83
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/regulator/88pm800.c236
-rw-r--r--drivers/regulator/ad5398.c1
-rw-r--r--drivers/regulator/core.c41
-rw-r--r--drivers/regulator/da9062-regulator.c1
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/pbias-regulator.c5
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c4
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c36
-rw-r--r--drivers/s390/block/dasd_alias.c3
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c33
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_srp.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c52
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c49
-rw-r--r--drivers/target/target_core_hba.c10
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c53
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c55
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/hcd.c55
-rw-r--r--drivers/usb/dwc2/hcd.h5
-rw-r--r--drivers/usb/dwc2/hcd_queue.c49
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c16
-rw-r--r--drivers/usb/gadget/function/f_midi.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c15
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/mos7720.c253
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c31
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/configfs/item.c4
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/inode.c4
-rw-r--r--fs/jfs/namei.c27
-rw-r--r--fs/locks.c38
-rw-r--r--fs/namei.c9
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c54
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/Kconfig6
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/kcore.c4
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/udf/inode.c19
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_edid.h19
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/amba/sp810.h2
-rw-r--r--include/linux/ata.h19
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/gpio/driver.h2
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/init.h78
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kobject.h5
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/module.h84
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_owner.h13
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/usb/cdc_ncm.h7
-rw-r--r--include/media/rc-core.h7
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h20
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--include/sound/soc-topology.h12
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/sound/asoc.h45
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/irq/resend.c18
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/timer.c4
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_branch.c17
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/dma-debug.c3
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/iommu-common.c2
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/rhashtable.c4
-rw-r--r--mm/cma.h2
-rw-r--r--mm/cma_debug.c11
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c54
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/page_owner.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c28
-rw-r--r--net/bridge/br_mdb.c18
-rw-r--r--net/bridge/br_multicast.c91
-rw-r--r--net/bridge/br_netfilter_hooks.c16
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c16
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c7
-rw-r--r--net/core/datagram.c57
-rw-r--r--net/core/dev.c45
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/rtnetlink.c198
-rw-r--r--net/core/skbuff.c39
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c9
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c25
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c90
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c71
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c11
-rw-r--r--net/netfilter/nfnetlink.c38
-rw-r--r--net/netfilter/xt_CT.c13
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c86
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/ib_rdma.c4
-rw-r--r--net/rds/info.c2
-rw-r--r--net/rds/transport.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c35
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/switchdev/switchdev.c12
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
-rw-r--r--samples/trace_events/trace-events-sample.h7
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--security/keys/keyring.c8
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/hda/hdac_i915.c5
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c4
-rw-r--r--sound/pci/hda/patch_realtek.c137
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/rt5645.c5
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/ssm4567.c8
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c14
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/mediatek/mt8173-max98090.c17
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c19
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c35
-rw-r--r--sound/soc/soc-topology.c62
-rw-r--r--sound/soc/zte/zx296702-i2s.c4
-rw-r--r--sound/soc/zte/zx296702-spdif.c4
-rw-r--r--sound/sparc/amd7930.c1
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/line6/pcm.c9
-rw-r--r--sound/usb/mixer_maps.c24
-rw-r--r--sound/usb/quirks-table.h68
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/hweight.c62
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/perf/MANIFEST2
-rw-r--r--tools/perf/Makefile.perf19
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-stat.c4
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/auxtrace.c10
-rw-r--r--tools/perf/util/machine.c20
-rw-r--r--tools/perf/util/python-ext-sources4
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/thread.c6
-rw-r--r--tools/perf/util/thread_map.c3
-rw-r--r--tools/perf/util/vdso.c8
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
-rw-r--r--virt/kvm/vfio.c5
1372 files changed, 14277 insertions, 9555 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 000000000000..cca6d870f7a5
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
+Christoph Hellwig <hch@lst.de>
diff --git a/.mailmap b/.mailmap
index 977f958eedbe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
@@ -116,6 +117,7 @@ Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
+Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
@@ -125,7 +127,9 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
-Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index bbed111c31b4..70c9b1ac66db 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1234,10 +1234,8 @@ Description:
object is near the sensor, usually be observing
reflectivity of infrared or ultrasound emitted.
Often these sensors are unit less and as such conversion
- to SI units is not possible. Where it is, the units should
- be meters. If such a conversion is not possible, the reported
- values should behave in the same way as a distance, i.e. lower
- values indicate something is closer to the sensor.
+ to SI units is not possible. Higher proximity measurements
+ indicate closer objects, and vice versa.
What: /sys/.../iio:deviceX/in_illuminance_input
What: /sys/.../iio:deviceX/in_illuminance_raw
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c0312cbd023d..2fb9a5457522 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="2" valign="top" >omap</td>
+ <td valign="top" >omap</td>
<td valign="top" >Generic</td>
<td valign="top" >“zorder”</td>
<td valign="top" >RANGE</td>
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 65610bf52ebf..1b049be6c84f 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -60,4 +60,4 @@ Introduction
Document Author
---------------
- Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
+ Viresh Kumar <vireshk@kernel.org>, (c) 2010-2012 ST Microelectronics
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 82960cffbad3..785eab87aa71 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -258,6 +258,12 @@ cache metadata mode : ro if read-only, rw if read-write
no further I/O will be permitted and the status will just
contain the string 'Fail'. The userspace recovery tools
should then be used.
+needs_check : 'needs_check' if set, '-' if not set
+ A metadata operation has failed, resulting in the needs_check
+ flag being set in the metadata's superblock. The metadata
+ device must be deactivated and checked/repaired before the
+ cache can be made fully operational again. '-' indicates
+ needs_check is not set.
Messages
--------
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 4f67578b2954..1699a55b7b70 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -296,7 +296,7 @@ ii) Status
underlying device. When this is enabled when loading the table,
it can get disabled if the underlying device doesn't support it.
- ro|rw
+ ro|rw|out_of_data_space
If the pool encounters certain types of device failures it will
drop into a read-only metadata mode in which no changes to
the pool metadata (like allocating new blocks) are permitted.
@@ -314,6 +314,13 @@ ii) Status
module parameter can be used to change this timeout -- it
defaults to 60 seconds but may be disabled using a value of 0.
+ needs_check
+ A metadata operation has failed, resulting in the needs_check
+ flag being set in the metadata's superblock. The metadata
+ device must be deactivated and checked/repaired before the
+ thin-pool can be made fully operational again. '-' indicates
+ needs_check is not set.
+
iii) Messages
create_thin <dev id>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
+ "ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>,
- <0x0 0x1b008000 0x0 0x2000>,
+ <0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549fff..971c3eedb1c7 100644
--- a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
@@ -65,8 +65,10 @@ Optional properties:
- edid: verbatim EDID data block describing attached display.
- ddc: phandle describing the i2c bus handling the display data
channel
-- port: A port node with endpoint definitions as defined in
+- port@[0-1]: Port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
+ Port 0 is the input port connected to the IPU display interface,
+ port 1 is the output port connected to a panel.
example:
@@ -75,9 +77,29 @@ display@di0 {
edid = [edid-data];
interface-pix-fmt = "rgb24";
- port {
+ port@0 {
+ reg = <0>;
+
display_in: endpoint {
remote-endpoint = <&ipu_di0_disp0>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+panel {
+ ...
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
NOTE: this only applies to the SMMU itself, not
masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+ : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 5d0376b8f202..211e7785f4d2 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,7 +17,6 @@ Required properties:
"fsl,imx6sx-usdhc"
Optional properties:
-- fsl,cd-controller : Indicate to use controller internal card detection
- fsl,wp-controller : Indicate to use controller internal write protection
- fsl,delay-line : Specify the number of delay cells for override mode.
This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70004000 0x4000>;
interrupts = <1>;
- fsl,cd-controller;
fsl,wp-controller;
};
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
- id: If there are multiple instance of the same type, in order to
differentiate between each instance "id" can be used (e.g., multi-lane PCIe
PHY). If "id" is not provided, it is set to default value of '1'.
+ - syscon-pllreset: Handle to system control region that contains the
+ CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
+ register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
This is usually a subnode of ocp2scp to which it is connected.
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
"sysclk",
"refclk";
};
+
+sata_phy: phy@4A096000 {
+ compatible = "ti,phy-pipe3-sata";
+ reg = <0x4A096000 0x80>, /* phy_rx */
+ <0x4A096400 0x64>, /* phy_tx */
+ <0x4A096800 0x40>; /* pll_ctrl */
+ reg-names = "phy_rx", "phy_tx", "pll_ctrl";
+ ctrl-module = <&omap_control_sata>;
+ clocks = <&sys_clkin1>, <&sata_ref_clk>;
+ clock-names = "sysclk", "refclk";
+ syscon-pllreset = <&scm_conf 0x3fc>;
+ #phy-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
index 829bd26d17f8..519e97c8f1b8 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
Required properties:
- compatible : "mediatek,mt8173-max98090"
- mediatek,audio-codec: the phandle of the MAX98090 audio codec
+- mediatek,platform: the phandle of MT8173 ASoC platform
Example:
sound {
compatible = "mediatek,mt8173-max98090";
mediatek,audio-codec = <&max98090>;
+ mediatek,platform = <&afe>;
};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index 61e98c976bd4..f205ce9e31dd 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
Required properties:
- compatible : "mediatek,mt8173-rt5650-rt5676"
- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
+- mediatek,platform: the phandle of MT8173 ASoC platform
Example:
sound {
compatible = "mediatek,mt8173-rt5650-rt5676";
mediatek,audio-codec = <&rt5650 &rt5676>;
+ mediatek,platform = <&afe>;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
Required properties:
- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
- reg: Base address and size of the controllers memory area
-- clocks: phandle to the AHB clock.
+- clocks: phandle of the AHB clock.
- clock-names: has to be "ahb".
- #address-cells: <1>, as required by generic SPI binding.
- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
Example:
- spi@1F000000 {
+ spi@1f000000 {
compatible = "qca,ar9132-spi", "qca,ar7100-spi";
- reg = <0x1F000000 0x10>;
+ reg = <0x1f000000 0x10>;
clocks = <&pll 2>;
clock-names = "ahb";
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
temp[2-9]_input CPU temperatures (1/1000 degree,
0.125 degree resolution)
-fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode
+pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
Setting SmartFan mode is supported only if it has been
previously configured by BIOS (or configuration EEPROM)
-fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode
+pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
The driver checks sensor control registers and does not export the sensors
that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
Dualpoint device -- interleaved packet format
---------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index e63b446d973c..13f888a02a3d 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
$(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
mode) if this option is supported by $(AR).
+ ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
+
+ These variables are appended to the KBUILD_CPPFLAGS,
+ KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
+ top-level Makefile has set any other flags. This provides a
+ means for an architecture to override the defaults.
+
+
--- 6.2 Add prerequisites to archheaders:
The archheaders: rule is used to generate header files that
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
- buf += "#include <asm/unaligned.h>\n\n"
+ buf += "#include <asm/unaligned.h>\n"
+ buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
- buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
- buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+
+ if proto_ident == "FC":
+ buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
+ elif proto_ident == "SAS":
+ buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+ elif proto_ident == "iSCSI":
+ buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
+
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
- buf += " .name = " + fabric_mod_name + ",\n"
+ buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
- buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
+ buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
- buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
+ buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
- buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
+ buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index fd6078443083..569568f6644f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -361,11 +361,11 @@ S: Supported
F: drivers/input/touchscreen/ad7879.c
ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
S: Maintained
ADM1025 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/adm1025
@@ -430,7 +430,7 @@ S: Maintained
F: drivers/macintosh/therm_adt746x.c
ADT7475 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/adt7475
@@ -445,7 +445,7 @@ F: drivers/input/misc/adxl34x.c
ADVANSYS SCSI DRIVER
M: Matthew Wilcox <matthew@wil.cx>
-M: Hannes Reinecke <hare@suse.de>
+M: Hannes Reinecke <hare@suse.com>
L: linux-scsi@vger.kernel.org
S: Maintained
F: Documentation/scsi/advansys.txt
@@ -506,7 +506,7 @@ F: drivers/scsi/aha152x*
F: drivers/scsi/pcmcia/aha152x*
AIC7XXX / AIC79XX SCSI DRIVER
-M: Hannes Reinecke <hare@suse.de>
+M: Hannes Reinecke <hare@suse.com>
L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/aic7xxx/
@@ -746,7 +746,7 @@ S: Maintained
F: sound/aoa/
APM DRIVER
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
S: Odd fixes
F: arch/x86/kernel/apm_32.c
F: include/linux/apm_bios.h
@@ -1001,6 +1001,7 @@ ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
M: Baruch Siach <baruch@tkos.co.il>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: arch/arm/boot/dts/cx92755*
N: digicolor
ARM/EBSA110 MACHINE SUPPORT
@@ -1324,7 +1325,7 @@ F: arch/arm/mach-pxa/include/mach/palmtc.h
F: arch/arm/mach-pxa/palmtc.c
ARM/PALM TREO SUPPORT
-M: Tomas Cech <sleep_walker@suse.cz>
+M: Tomas Cech <sleep_walker@suse.com>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
@@ -2405,7 +2406,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
M: Josef Bacik <jbacik@fb.com>
-M: David Sterba <dsterba@suse.cz>
+M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2748,7 +2749,7 @@ COCCINELLE/Semantic Patches (SmPL)
M: Julia Lawall <Julia.Lawall@lip6.fr>
M: Gilles Muller <Gilles.Muller@lip6.fr>
M: Nicolas Palix <nicolas.palix@imag.fr>
-M: Michal Marek <mmarek@suse.cz>
+M: Michal Marek <mmarek@suse.com>
L: cocci@systeme.lip6.fr (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
W: http://coccinelle.lip6.fr/
@@ -2864,7 +2865,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
-M: Michal Hocko <mhocko@suse.cz>
+M: Michal Hocko <mhocko@kernel.org>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@@ -2945,7 +2946,7 @@ F: arch/x86/kernel/cpuid.c
F: arch/x86/kernel/msr.c
CPU POWER MONITORING SUBSYSTEM
-M: Thomas Renninger <trenn@suse.de>
+M: Thomas Renninger <trenn@suse.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: tools/power/cpupower/
@@ -3175,7 +3176,7 @@ F: Documentation/networking/dmfe.txt
F: drivers/net/ethernet/dec/tulip/dmfe.c
DC390/AM53C974 SCSI driver
-M: Hannes Reinecke <hare@suse.de>
+M: Hannes Reinecke <hare@suse.com>
L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/am53c974.c
@@ -3379,7 +3380,7 @@ W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
S: Maintained
DISKQUOTA
-M: Jan Kara <jack@suse.cz>
+M: Jan Kara <jack@suse.com>
S: Maintained
F: Documentation/filesystems/quota.txt
F: fs/quota/
@@ -3435,7 +3436,7 @@ F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
S: Maintained
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
F: Documentation/ABI/testing/sysfs-firmware-dmi-tables
@@ -3586,6 +3587,15 @@ S: Maintained
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
+DRM DRIVERS FOR STI
+M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Vincent Abriou <vincent.abriou@st.com>
+L: dri-devel@lists.freedesktop.org
+T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S: Maintained
+F: drivers/gpu/drm/sti
+F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -4051,7 +4061,7 @@ F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
EXT2 FILE SYSTEM
-M: Jan Kara <jack@suse.cz>
+M: Jan Kara <jack@suse.com>
L: linux-ext4@vger.kernel.org
S: Maintained
F: Documentation/filesystems/ext2.txt
@@ -4059,7 +4069,7 @@ F: fs/ext2/
F: include/linux/ext2*
EXT3 FILE SYSTEM
-M: Jan Kara <jack@suse.cz>
+M: Jan Kara <jack@suse.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Andreas Dilger <adilger.kernel@dilger.ca>
L: linux-ext4@vger.kernel.org
@@ -4109,7 +4119,7 @@ F: drivers/video/fbdev/exynos/exynos_mipi*
F: include/video/exynos_mipi*
F71805F HARDWARE MONITORING DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/f71805f
@@ -4244,7 +4254,7 @@ S: Maintained
F: drivers/block/rsxx/
FLOPPY DRIVER
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
S: Odd fixes
F: drivers/block/floppy.c
@@ -4665,7 +4675,7 @@ F: drivers/media/usb/stk1160/
H8/300 ARCHITECTURE
M: Yoshinori Sato <ysato@users.sourceforge.jp>
-L: uclinux-h8-devel@lists.sourceforge.jp
+L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers)
W: http://uclinux-h8.sourceforge.jp
T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git
S: Maintained
@@ -4712,7 +4722,7 @@ S: Maintained
F: drivers/media/usb/hackrf/
HARDWARE MONITORING
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
M: Guenter Roeck <linux@roeck-us.net>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
@@ -4815,7 +4825,7 @@ F: include/linux/pm.h
F: arch/*/include/asm/suspend*.h
HID CORE LAYER
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
L: linux-input@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
@@ -4824,7 +4834,7 @@ F: include/linux/hid*
F: include/uapi/linux/hid*
HID SENSOR HUB DRIVERS
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
M: Jonathan Cameron <jic23@kernel.org>
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
L: linux-input@vger.kernel.org
@@ -4958,7 +4968,7 @@ F: include/linux/hyperv.h
F: tools/hv/
I2C OVER PARALLEL PORT
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-parport
@@ -4967,7 +4977,7 @@ F: drivers/i2c/busses/i2c-parport.c
F: drivers/i2c/busses/i2c-parport-light.c
I2C/SMBUS CONTROLLER DRIVERS FOR PC
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-ali1535
@@ -5008,7 +5018,7 @@ F: drivers/i2c/busses/i2c-ismt.c
F: Documentation/i2c/busses/i2c-ismt
I2C/SMBUS STUB DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
@@ -5035,7 +5045,7 @@ L: linux-acpi@vger.kernel.org
S: Maintained
I2C-TAOS-EVM DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-taos-evm
@@ -5564,8 +5574,8 @@ F: include/uapi/linux/ip_vs.h
F: net/netfilter/ipvs/
IPWIRELESS DRIVER
-M: Jiri Kosina <jkosina@suse.cz>
-M: David Sterba <dsterba@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
+M: David Sterba <dsterba@suse.com>
S: Odd Fixes
F: drivers/tty/ipwireless/
@@ -5599,6 +5609,7 @@ F: kernel/irq/
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
M: Jason Cooper <jason@lakedaemon.net>
+M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5607,11 +5618,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
-M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+M: Jiang Liu <jiang.liu@linux.intel.com>
+M: Marc Zyngier <marc.zyngier@arm.com>
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/IRQ-domain.txt
F: include/linux/irqdomain.h
F: kernel/irq/irqdomain.c
+F: kernel/irq/msi.c
ISAPNP
M: Jaroslav Kysela <perex@perex.cz>
@@ -5685,7 +5699,7 @@ S: Maintained
F: drivers/isdn/hardware/eicon/
IT87 HARDWARE MONITORING DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/it87
@@ -5752,7 +5766,7 @@ F: include/uapi/linux/jffs2.h
JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
M: Andrew Morton <akpm@linux-foundation.org>
-M: Jan Kara <jack@suse.cz>
+M: Jan Kara <jack@suse.com>
L: linux-ext4@vger.kernel.org
S: Maintained
F: fs/jbd/
@@ -5816,7 +5830,7 @@ S: Maintained
F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
-M: Michal Marek <mmarek@suse.cz>
+M: Michal Marek <mmarek@suse.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
L: linux-kbuild@vger.kernel.org
@@ -5880,7 +5894,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M: Alexander Graf <agraf@suse.de>
+M: Alexander Graf <agraf@suse.com>
L: kvm-ppc@vger.kernel.org
W: http://kvm.qumranet.com
T: git git://github.com/agraf/linux-2.6.git
@@ -5898,7 +5912,6 @@ S: Supported
F: Documentation/s390/kvm.txt
F: arch/s390/include/asm/kvm*
F: arch/s390/kvm/
-F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -6037,7 +6050,7 @@ F: drivers/leds/
F: include/linux/leds.h
LEGACY EEPROM DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
S: Maintained
F: Documentation/misc-devices/eeprom
F: drivers/misc/eeprom/eeprom.c
@@ -6090,7 +6103,7 @@ F: include/linux/ata.h
F: include/linux/libata.h
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
S: Maintained
@@ -6251,8 +6264,8 @@ F: drivers/platform/x86/hp_accel.c
LIVE PATCHING
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Seth Jennings <sjenning@redhat.com>
-M: Jiri Kosina <jkosina@suse.cz>
-M: Vojtech Pavlik <vojtech@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
+M: Vojtech Pavlik <vojtech@suse.com>
S: Maintained
F: kernel/livepatch/
F: include/linux/livepatch.h
@@ -6278,21 +6291,21 @@ S: Maintained
F: drivers/hwmon/lm73.c
LM78 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/lm78
F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/lm83
F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/lm90
@@ -6838,6 +6851,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/usb/msi2500/
+MSYSTEMS DISKONCHIP G3 MTD DRIVER
+M: Robert Jarzmik <robert.jarzmik@free.fr>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/devices/docg3*
+
MT9M032 APTINA SENSOR DRIVER
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -7019,6 +7038,7 @@ F: include/uapi/linux/netfilter/
F: net/*/netfilter.c
F: net/*/netfilter/
F: net/netfilter/
+F: net/bridge/br_netfilter*.c
NETLABEL
M: Paul Moore <paul@paul-moore.com>
@@ -7718,7 +7738,7 @@ S: Maintained
F: drivers/char/pc8736x_gpio.c
PC87427 HARDWARE MONITORING DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/pc87427
@@ -7995,7 +8015,7 @@ S: Maintained
F: drivers/pinctrl/samsung/
PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -8003,7 +8023,7 @@ S: Maintained
F: drivers/pinctrl/spear/
PKTCDVD DRIVER
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
@@ -8894,7 +8914,7 @@ S: Maintained
F: drivers/tty/serial/
SYNOPSYS DESIGNWARE DMAC DRIVER
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: include/linux/dma/dw.h
@@ -9061,7 +9081,7 @@ S: Maintained
F: drivers/mmc/host/sdhci-s3c*
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
L: spear-devel@list.st.com
L: linux-mmc@vger.kernel.org
S: Maintained
@@ -9423,7 +9443,7 @@ F: Documentation/hwmon/sch5627
F: drivers/hwmon/sch5627.c
SMSC47B397 HARDWARE MONITOR DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/smsc47b397
@@ -9472,7 +9492,7 @@ S: Supported
F: drivers/media/pci/solo6x10/
SOFTWARE RAID (Multiple Disks) SUPPORT
-M: Neil Brown <neilb@suse.de>
+M: Neil Brown <neilb@suse.com>
L: linux-raid@vger.kernel.org
S: Supported
F: drivers/md/
@@ -9515,7 +9535,7 @@ F: drivers/memstick/core/ms_block.*
SOUND
M: Jaroslav Kysela <perex@perex.cz>
-M: Takashi Iwai <tiwai@suse.de>
+M: Takashi Iwai <tiwai@suse.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://www.alsa-project.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
@@ -9599,7 +9619,7 @@ S: Maintained
F: include/linux/compiler.h
SPEAR PLATFORM SUPPORT
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9608,7 +9628,7 @@ S: Maintained
F: arch/arm/mach-spear/
SPEAR CLOCK FRAMEWORK SUPPORT
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <vireshk@kernel.org>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -10398,7 +10418,7 @@ K: ^Subject:.*(?i)trivial
TTY LAYER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-M: Jiri Slaby <jslaby@suse.cz>
+M: Jiri Slaby <jslaby@suse.com>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: Documentation/serial/
@@ -10472,7 +10492,7 @@ F: arch/m68k/*/*_no.*
F: arch/m68k/include/asm/*_no.*
UDF FILESYSTEM
-M: Jan Kara <jack@suse.cz>
+M: Jan Kara <jack@suse.com>
S: Maintained
F: Documentation/filesystems/udf.txt
F: fs/udf/
@@ -10615,7 +10635,7 @@ F: drivers/usb/gadget/
F: include/linux/usb/gadget*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
-M: Jiri Kosina <jkosina@suse.cz>
+M: Jiri Kosina <jkosina@suse.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
@@ -10740,7 +10760,7 @@ S: Maintained
F: drivers/usb/host/uhci*
USB "USBNET" DRIVER FRAMEWORK
-M: Oliver Neukum <oneukum@suse.de>
+M: Oliver Neukum <oneukum@suse.com>
L: netdev@vger.kernel.org
W: http://www.linux-usb.org/usbnet
S: Maintained
@@ -10894,6 +10914,15 @@ F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/uapi/linux/virtio_*.h
+VIRTIO DRIVERS FOR S390
+M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Cornelia Huck <cornelia.huck@de.ibm.com>
+L: linux-s390@vger.kernel.org
+L: virtualization@lists.linux-foundation.org
+L: kvm@vger.kernel.org
+S: Supported
+F: drivers/s390/virtio/
+
VIRTIO GPU DRIVER
M: David Airlie <airlied@linux.ie>
M: Gerd Hoffmann <kraxel@redhat.com>
@@ -11067,7 +11096,7 @@ F: Documentation/hwmon/w83793
F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER
-M: Jean Delvare <jdelvare@suse.de>
+M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: drivers/hwmon/w83795.c
diff --git a/Makefile b/Makefile
index 257ef5892ab7..246053f04fb5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc8
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
+# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
+# values of the respective KBUILD_* variables
+ARCH_CPPFLAGS :=
+ARCH_AFLAGS :=
+ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -780,10 +785,11 @@ endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
-KBUILD_CPPFLAGS += $(KCPPFLAGS)
-KBUILD_AFLAGS += $(KAFLAGS)
-KBUILD_CFLAGS += $(KCFLAGS)
+# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
+# last assignments
+KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
+KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
+KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
# Use --build-id when available.
LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
@@ -847,10 +853,10 @@ export mod_strip_cmd
mod_compress_cmd = true
ifdef CONFIG_MODULE_COMPRESS
ifdef CONFIG_MODULE_COMPRESS_GZIP
- mod_compress_cmd = gzip -n
+ mod_compress_cmd = gzip -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ
- mod_compress_cmd = xz
+ mod_compress_cmd = xz -f
endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd
diff --git a/arch/Kconfig b/arch/Kconfig
index bec6666a3cc4..8a8ea7110de8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR
config ARCH_THREAD_INFO_ALLOCATOR
bool
+# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
+config ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ bool
+
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index cde23cd03609..ffd9cf5ec8c4 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/mm-arch-hooks.h b/arch/alpha/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b07fd862fec3..000000000000
--- a/arch/alpha/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H
-#define _ASM_ALPHA_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e7cee0a5c56d..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -115,6 +115,7 @@ if ISA_ARCOMPACT
config ARC_CPU_750D
bool "ARC750D"
+ select ARC_CANT_LLSC
help
Support for ARC750 core
@@ -312,11 +313,11 @@ config ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_16K
bool "16KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
config ARC_PAGE_SIZE_4K
bool "4KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
endchoice
@@ -362,7 +363,12 @@ config ARC_CANT_LLSC
config ARC_HAS_LLSC
bool "Insn: LLOCK/SCOND (efficient atomic ops)"
default y
- depends on !ARC_CPU_750D && !ARC_CANT_LLSC
+ depends on !ARC_CANT_LLSC
+
+config ARC_STAR_9000923308
+ bool "Workaround for llock/scond livelock"
+ default y
+ depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
@@ -378,6 +384,10 @@ config ARC_HAS_LL64
dest operands with 2 possible source operands.
default y
+config ARC_HAS_DIV_REM
+ bool "Insn: div, divu, rem, remu"
+ default y
+
config ARC_HAS_RTC
bool "Local 64-bit r/o cycle counter"
default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 6107062c0111..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+ifdef CONFIG_ISA_ARCV2
+
ifndef CONFIG_ARC_HAS_LL64
-cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64
+cflags-y += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y += -mno-div-rem
+endif
+
endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
@@ -49,7 +57,8 @@ endif
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
-cflags-y += -O3
+# Note: No need to add to cflags-y as that happens anyways
+ARCH_CFLAGS += -O3
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 15c8d6226c9d..1cd5e82f5dc2 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -12,7 +12,7 @@
/ {
compatible = "snps,arc";
- clock-frequency = <75000000>;
+ clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 199d42820eca..2f0b33257db2 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -12,7 +12,7 @@
/ {
compatible = "snps,arc";
- clock-frequency = <75000000>;
+ clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 1a80cc91a03b..7611b10a2d23 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
-
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
-
+#define AUX_NON_VOL 0x5e
/*
* Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:8, pad2:8, sz:8, pad:8;
+ unsigned int start:8, pad2:8, sz:8, ver:8;
#else
- unsigned int pad:8, sz:8, pad2:8, start:8;
+ unsigned int ver:8, sz:8, pad2:8, start:8;
#endif
};
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
#define atomic_set(v, i) (((v)->counter) = (i))
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW " prefetchw [%1] \n"
-#else
-#define PREFETCHW
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay = 1, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bz 4f \n" \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ "4: ; --- success --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
+
+#else /* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bnz 1b \n" \
+
+#define SCOND_FAIL_RETRY_VARS
+
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
+ [i] "ir" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
: "cc"); \
\
smp_mb(); \
\
- return temp; \
+ return val; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
/**
* __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 99fe118d3730..57c1f33844d4 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
* done for const @nr, but no code is generated due to gcc \
* const prop. \
*/ \
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
+ nr &= 0x1f; \
\
__asm__ __volatile__( \
"1: llock %0, [%1] \n" \
@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
\
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
+ nr &= 0x1f; \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long temp, flags; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
bitops_lock(flags); \
\
temp = *m; \
- *m = temp c_op (1UL << nr); \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
\
bitops_unlock(flags); \
}
@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
unsigned long old, flags; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
bitops_lock(flags); \
\
old = *m; \
- *m = old c_op (1 << nr); \
+ *m = old c_op (1UL << (nr & 0x1f)); \
\
bitops_unlock(flags); \
\
- return (old & (1 << nr)) != 0; \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
#endif /* CONFIG_ARC_HAS_LLSC */
@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
unsigned long temp; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
temp = *m; \
- *m = temp c_op (1UL << nr); \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
}
#define __TEST_N_BIT_OP(op, c_op, asm_op) \
@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
unsigned long old; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
old = *m; \
- *m = old c_op (1 << nr); \
+ *m = old c_op (1UL << (nr & 0x1f)); \
\
- return (old & (1 << nr)) != 0; \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
#define BIT_OPS(op, c_op, asm_op) \
@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
addr += nr >> 5;
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- mask = 1 << nr;
+ mask = 1UL << (nr & 0x1f);
return ((mask & *addr) != 0);
}
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 05b5aaf5b0f9..70cfe16b742d 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -16,12 +16,40 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: llock %1, [%2] \n" \
+ insn "\n" \
+ "2: scond %0, [%2] \n" \
+ " bnz 1b \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\
__asm__ __volatile__( \
- "1: ld %1, [%2] \n" \
+ "1: ld %1, [%2] \n" \
insn "\n" \
- "2: st %0, [%2] \n" \
+ "2: st %0, [%2] \n" \
" mov %0, 0 \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
@@ -39,6 +67,8 @@
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory")
+#endif
+
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
@@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
pagefault_disable();
- /* TBD : can use llock/scond */
__asm__ __volatile__(
- "1: ld %0, [%3] \n"
- " brne %0, %1, 3f \n"
- "2: st %2, [%3] \n"
+#ifdef CONFIG_ARC_HAS_LLSC
+ "1: llock %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: scond %2, [%3] \n"
+ " bnz 1b \n"
+#else
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+#endif
"3: \n"
" .section .fixup,\"ax\" \n"
"4: mov %0, %4 \n"
diff --git a/arch/arc/include/asm/mm-arch-hooks.h b/arch/arc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index c37541c5f8ba..000000000000
--- a/arch/arc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_MM_ARCH_HOOKS_H
-#define _ASM_ARC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91755972b9a2..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
struct pt_regs {
/* Real registers */
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long lp_start, lp_end, lp_count;
+ unsigned long lp_start, lp_end, lp_count;
- long status32; /* status32_l1, status32_l2, erstatus */
- long ret; /* ilink1, ilink2 or eret */
- long blink;
- long fp;
- long r26; /* gp */
+ unsigned long status32; /* status32_l1, status32_l2, erstatus */
+ unsigned long ret; /* ilink1, ilink2 or eret */
+ unsigned long blink;
+ unsigned long fp;
+ unsigned long r26; /* gp */
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp; /* user/kernel sp depending on where we came from */
- long orig_r0;
+ unsigned long sp; /* User/Kernel depending on where we came from */
+ unsigned long orig_r0;
/*
* To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
unsigned long event;
};
- long user_r25;
+ unsigned long user_r25;
};
#else
struct pt_regs {
- long orig_r0;
+ unsigned long orig_r0;
union {
struct {
@@ -76,26 +76,26 @@ struct pt_regs {
unsigned long event;
};
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long user_r25;
+ unsigned long user_r25;
- long r26; /* gp */
- long fp;
- long sp; /* user/kernel sp depending on where we came from */
+ unsigned long r26; /* gp */
+ unsigned long fp;
+ unsigned long sp; /* user/kernel sp depending on where we came from */
- long r12;
+ unsigned long r12;
/*------- Below list auto saved by h/w -----------*/
- long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
- long blink;
- long lp_end, lp_start, lp_count;
+ unsigned long blink;
+ unsigned long lp_end, lp_start, lp_count;
- long ei, ldi, jli;
+ unsigned long ei, ldi, jli;
- long ret;
- long status32;
+ unsigned long ret;
+ unsigned long status32;
};
#endif
@@ -103,7 +103,7 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
- long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define instruction_pointer(regs) ((regs)->ret)
@@ -142,7 +142,7 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs)
{
- return regs->r0;
+ return (long)regs->r0;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " mov %[got_it], 1 \n"
+ "4: \n"
+ " \n"
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+#else /* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ " \n" \
+ "4: ; --- done --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n"
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " scond %[UNLOCKED], [%[rwlock]]\n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif /* CONFIG_ARC_STAR_9000923308 */
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
* This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
- : "+&r" (tmp)
+ : "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
smp_mb();
}
+/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
smp_mb();
- return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+ return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
/*
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__(
" ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex));
}
+#endif
+
+#define arch_read_can_lock(x) ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
*/
typedef struct {
volatile unsigned int counter;
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex;
+#endif
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
*/
struct user_regs_struct {
- long pad;
+ unsigned long pad;
struct {
- long bta, lp_start, lp_end, lp_count;
- long status32, ret, blink, fp, gp;
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp;
+ unsigned long bta, lp_start, lp_end, lp_count;
+ unsigned long status32, ret, blink, fp, gp;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long sp;
} scratch;
- long pad2;
+ unsigned long pad2;
struct {
- long r25, r24, r23, r22, r21, r20;
- long r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20;
+ unsigned long r19, r18, r17, r16, r15, r14, r13;
} callee;
- long efa; /* break pt addr, for break points in delay slots */
- long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
+ unsigned long efa; /* break pt addr, for break points in delay slots */
+ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 6208c630abed..26c156827479 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
#include <asm/irq.h>
/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index fcdddb631766..039fac30b5c1 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
#include <asm/irq.h>
/*
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 30284e8de6ff..2fb86589054d 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include "../../drivers/irqchip/irqchip.h"
/*
* Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
+#ifdef CONFIG_SMP
static int
-idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f)
+idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
+ bool force)
{
+ unsigned long flags;
+ cpumask_t online;
+
+ /* errout if no online cpu per @cpumask */
+ if (!cpumask_and(&online, cpumask, cpu_online_mask))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
+ idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
return IRQ_SET_MASK_OK;
}
+#endif
static struct irq_chip idu_irq_chip = {
.name = "MCIP IDU Intc",
@@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
if (!i)
idu_first_irq = irq;
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, idu_cascade_isr);
+ irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a3d186211ed3..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
struct bcr_perip uncached_space;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ unsigned long perip_space;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
- BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
+ if (uncached_space.ver < 3)
+ perip_space = uncached_space.start << 24;
+ else
+ perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
+
+ BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
@@ -142,17 +148,22 @@ static void read_arc_build_cfg_regs(void)
}
static const struct cpuinfo_data arc_cpu_tbl[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
{ {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35},
- { {0x50, "ARC HS38" }, 0x51},
+#else
+ { {0x50, "ARC HS38 R2.0"}, 0x51},
+ { {0x52, "ARC HS38 R2.1"}, 0x52},
+#endif
{ {0x00, NULL } }
};
-#define IS_AVAIL1(v, str) ((v) ? str : "")
-#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ")
-#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg))
+#define IS_AVAIL1(v, s) ((v) ? s : "")
+#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
@@ -226,7 +237,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
}
n += scnprintf(buf + n, len - n, "%s",
- IS_USED(CONFIG_ARC_HAS_HW_MPY));
+ IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
}
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
@@ -325,6 +336,10 @@ static void arc_chk_core_config(void)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+
+ if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+ !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
+ panic("llock/scond livelock workaround missing\n");
}
/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
return 0;
}
-static void arc_clkevent_set_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /*
- * At X Hz, 1 sec = 1000ms -> X cycles;
- * 10ms -> X / 100 cycles
- */
- arc_timer_event_setup(arc_get_core_freq() / HZ);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- break;
- default:
- break;
- }
-
- return;
+ /*
+ * At X Hz, 1 sec = 1000ms -> X cycles;
+ * 10ms -> X / 100 cycles
+ */
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
- .name = "ARC Timer0",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .mode = CLOCK_EVT_MODE_UNUSED,
- .rating = 300,
- .irq = TIMER0_IRQ, /* hardwired, no need for resources */
- .set_next_event = arc_clkevent_set_next_event,
- .set_mode = arc_clkevent_set_mode,
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_state_periodic = arc_clkevent_set_periodic,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
- int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
+ int irq_reenable = clockevent_state_periodic(evt);
/*
* Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 807f7d61d7a7..a6f91e88ce36 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
{
- struct path path;
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetch [r3, 32] ;Prefetch the next write location
+ prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
#undef PREALLOC_NOT_AVAIL
-#ifdef PREALLOC_NOT_AVAIL
-#define PREWRITE(A,B) prefetchw [(A),(B)]
-#else
-#define PREWRITE(A,B) prealloc [(A),(B)]
-#endif
-
ENTRY(memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
+
lpnz @.Lset64bytes
;; LOOP START
- PREWRITE(r3, 64) ;Prefetch the next write location
+#ifdef PREALLOC_NOT_AVAIL
+ prefetchw [r3, 64] ;Prefetch the next write location
+#else
+ prealloc [r3, 64]
+#endif
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index b29d62ed4f7e..1cd6695b6ab5 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
+ /*
+ * SLC is shared between all cores and concurrent aux operations from
+ * multiple cores need to be serialized using a spinlock
+ * A concurrent operation can be silently ignored and/or the old/new
+ * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+ * below)
+ */
+ static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned int ctrl;
- local_irq_save(flags);
+ spin_lock_irqsave(&lock, flags);
/*
* The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&lock, flags);
#endif
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 74a637a1cfc4..57706a9c6948 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
/* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size);
- if (kvaddr != NULL)
- memset(kvaddr, 0, size);
+ if (kvaddr == NULL)
+ return NULL;
/* This is bus address, platform dependent */
*dma_handle = (dma_addr_t)paddr;
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
static void __init axs103_early_init(void)
{
+ /*
+ * AXS103 configurations for SMP/QUAD configurations share device tree
+ * which defaults to 90 MHz. However recent failures of Quad config
+ * revealed P&R timing violations so clamp it down to safe 50 MHz
+ * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+ *
+ * This hack is really hacky as of now. Fix it properly by getting the
+ * number of cores as return value of platform's early SMP callback
+ */
+#ifdef CONFIG_ARC_MCIP
+ unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+ if (num_cores > 2)
+ arc_set_core_freq(50 * 1000000);
+#endif
+
switch (arc_get_core_freq()/1000000) {
case 33:
axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+bootpImage uImage: zImage
+zImage: Image
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 0d35ab64641c..7106114c7464 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -74,6 +74,7 @@
audio_codec: tlv320aic3106@1b {
compatible = "ti,tlv320aic3106";
reg = <0x1b>;
+ ai3x-micbias-vg = <0x2>;
};
accel: lis331dlh@1d {
@@ -153,7 +154,7 @@
ti,audio-routing =
"Headphone Jack", "HPLOUT",
"Headphone Jack", "HPROUT",
- "LINE1L", "Line In";
+ "MIC3L", "Mic3L Switch";
};
&mcasp0 {
@@ -438,41 +439,50 @@
regulators {
dcdc1_reg: regulator@0 {
/* VDD_1V8 system supply */
+ regulator-always-on;
};
dcdc2_reg: regulator@1 {
/* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <1325000>;
+ regulator-max-microvolt = <1150000>;
regulator-boot-on;
+ regulator-always-on;
};
dcdc3_reg: regulator@2 {
/* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <1150000>;
+ regulator-max-microvolt = <1325000>;
regulator-boot-on;
+ regulator-always-on;
};
ldo1_reg: regulator@3 {
/* VRTC 1.8V always-on supply */
+ regulator-name = "vrtc,vdds";
regulator-always-on;
};
ldo2_reg: regulator@4 {
/* 3.3V rail */
+ regulator-name = "vdd_3v3aux";
+ regulator-always-on;
};
ldo3_reg: regulator@5 {
/* VDD_3V3A 3.3V rail */
+ regulator-name = "vdd_3v3a";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
ldo4_reg: regulator@6 {
/* VDD_3V3B 3.3V rail */
+ regulator-name = "vdd_3v3b";
+ regulator-always-on;
};
};
};
diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
index 9c7fb0acae79..4e42f30cb318 100644
--- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi
+++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
@@ -22,6 +22,7 @@
MATRIX_KEY(0x00, 0x02, KEY_F1)
MATRIX_KEY(0x00, 0x03, KEY_B)
MATRIX_KEY(0x00, 0x04, KEY_F10)
+ MATRIX_KEY(0x00, 0x05, KEY_RO)
MATRIX_KEY(0x00, 0x06, KEY_N)
MATRIX_KEY(0x00, 0x08, KEY_EQUAL)
MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT)
@@ -34,6 +35,7 @@
MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE)
MATRIX_KEY(0x01, 0x09, KEY_F9)
MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE)
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN)
MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL)
MATRIX_KEY(0x02, 0x01, KEY_TAB)
@@ -45,6 +47,7 @@
MATRIX_KEY(0x02, 0x07, KEY_102ND)
MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE)
MATRIX_KEY(0x02, 0x09, KEY_F8)
+ MATRIX_KEY(0x02, 0x0a, KEY_YEN)
MATRIX_KEY(0x03, 0x01, KEY_GRAVE)
MATRIX_KEY(0x03, 0x02, KEY_F2)
@@ -53,6 +56,7 @@
MATRIX_KEY(0x03, 0x06, KEY_6)
MATRIX_KEY(0x03, 0x08, KEY_MINUS)
MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH)
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN)
MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
MATRIX_KEY(0x04, 0x01, KEY_A)
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index aa465904f6cc..096f68be99e2 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -686,7 +686,8 @@
&dcan1 {
status = "ok";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&dcan1_pins_default>;
+ pinctrl-names = "default", "sleep", "active";
+ pinctrl-0 = <&dcan1_pins_sleep>;
pinctrl-1 = <&dcan1_pins_sleep>;
+ pinctrl-2 = <&dcan1_pins_default>;
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -1140,6 +1140,7 @@
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
+ syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 4e1b60581782..803738414086 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -587,9 +587,10 @@
&dcan1 {
status = "ok";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&dcan1_pins_default>;
+ pinctrl-names = "default", "sleep", "active";
+ pinctrl-0 = <&dcan1_pins_sleep>;
pinctrl-1 = <&dcan1_pins_sleep>;
+ pinctrl-2 = <&dcan1_pins_default>;
};
&qspi {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy";
- reg = <0x10020710 8>;
#phy-cells = <1>;
+ syscon = <&pmu_system_controller>;
};
pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&buck1_reg>;
+};
+
&fimd {
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&varm_breg>;
+};
+
&dsi_0 {
vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&vdd_arm_reg>;
+};
+
&pinctrl_1 {
hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x900>;
+ clocks = <&clock CLK_ARM_CLK>;
+ clock-names = "cpu";
+ clock-latency = <160000>;
+
+ operating-points = <
+ 1200000 1250000
+ 1000000 1150000
+ 800000 1075000
+ 500000 975000
+ 400000 975000
+ 200000 950000
+ >;
cooling-min-level = <4>;
cooling-max-level = <2>;
#cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index c892d58e8dad..b995333ea22b 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -468,6 +468,7 @@
interrupts = <36 37 38 39 40 41 42 43 44>;
status = "disabled";
clocks = <&clks 26>;
+ #io-channel-cells = <1>;
};
spdif@80054000 {
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e6971bc3..9351296356dc 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include "imx25.dtsi"
@@ -114,8 +115,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio2 1 0>;
- wp-gpios = <&gpio2 0 0>;
+ cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc215e4b75fd..b69be5c499cf 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -108,7 +108,7 @@
};
gpt1: timer@10003000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x10003000 0x1000>;
interrupts = <26>;
clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
@@ -117,7 +117,7 @@
};
gpt2: timer@10004000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x10004000 0x1000>;
interrupts = <25>;
clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>,
@@ -126,7 +126,7 @@
};
gpt3: timer@10005000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x10005000 0x1000>;
interrupts = <24>;
clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>,
@@ -376,7 +376,7 @@
};
gpt4: timer@10019000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x10019000 0x1000>;
interrupts = <4>;
clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>,
@@ -385,7 +385,7 @@
};
gpt5: timer@1001a000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x1001a000 0x1000>;
interrupts = <3>;
clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>,
@@ -436,7 +436,7 @@
};
gpt6: timer@1001f000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x1001f000 0x1000>;
interrupts = <2>;
clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe4000 0x1000>;
- clocks = <&clks 33>;
- clock-names = "ipg";
+ clocks = <&clks 33>, <&clks 33>;
+ clock-names = "ipg", "per";
interrupts = <43>;
status = "disabled";
};
@@ -295,8 +295,8 @@
can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe8000 0x1000>;
- clocks = <&clks 34>;
- clock-names = "ipg";
+ clocks = <&clks 34>, <&clks 34>;
+ clock-names = "ipg", "per";
interrupts = <44>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea12328c..0f3fe29b816e 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
bus-width = <4>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad52f59..3bc18835fb4b 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio1 1 0>;
- wp-gpios = <&gpio1 9 0>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57eb432..53f40885c530 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio1 1 0>;
- wp-gpios = <&gpio1 9 0>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 181ae5ebf23f..b0d5542ac829 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
&esdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc3>;
- cd-gpios = <&gpio3 11 0>;
- wp-gpios = <&gpio3 12 0>;
+ cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
bus-width = <8>;
status = "okay";
};
@@ -295,9 +295,10 @@
&tve {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_vga_sync>;
+ ddc-i2c-bus = <&i2c2>;
fsl,tve-mode = "vga";
- fsl,hsync-pin = <4>;
- fsl,vsync-pin = <6>;
+ fsl,hsync-pin = <7>; /* IPU DI1 PIN7 via EIM_OE */
+ fsl,vsync-pin = <8>; /* IPU DI1 PIN8 via EIM_RW */
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d325576bcc0..fc89ce1e5763 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio3 13 0>;
- wp-gpios = <&gpio4 11 0>;
+ cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2868bf..e03373a58760 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
pinctrl-0 = <&pinctrl_esdhc2>,
<&pinctrl_esdhc2_cdwp>;
vmmc-supply = <&reg_3p3v>;
- wp-gpios = <&gpio1 2 0>;
- cd-gpios = <&gpio1 4 0>;
+ wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72cbfec..d3e50b22064f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
};
&esdhc1 {
- cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
};
&esdhc2 {
- cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad6dba5..fc51b87ad208 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
&esdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc2>;
- cd-gpios = <&gpio3 25 0>;
- wp-gpios = <&gpio2 19 0>;
+ cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 43cb3fd76be7..5111f5170d53 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -305,8 +305,8 @@
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
- cd-gpios = <&gpio1 4 0>;
- wp-gpios = <&gpio1 2 0>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
@@ -314,8 +314,8 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 0>;
- wp-gpios = <&gpio7 1 0>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 78df05e9d1ce..d6515f7a56c4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -11,6 +11,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx6q.dtsi"
/ {
@@ -196,8 +197,8 @@
};
&usdhc3 {
- cd-gpios = <&gpio6 11 0>;
- wp-gpios = <&gpio6 14 0>;
+ cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
index 703539cf36d3..00bd63e63d0c 100644
--- a/arch/arm/boot/dts/imx6q-gk802.dts
+++ b/arch/arm/boot/dts/imx6q-gk802.dts
@@ -7,6 +7,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx6q.dtsi"
/ {
@@ -161,7 +162,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
bus-width = <4>;
- cd-gpios = <&gpio6 11 0>;
+ cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index a43abfa21e33..5645d52850a7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -251,7 +251,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
- cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
@@ -260,7 +260,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
bus-width = <4>;
- cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index e6d9195a1da7..f4d6ae564ead 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -173,7 +173,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
vmmc-supply = <&reg_3p3v>;
- cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -181,7 +181,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
vmmc-supply = <&reg_3p3v>;
- cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 1d85de2befb3..a47a0399a172 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -392,7 +392,7 @@
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
- cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
no-1-8-v;
status = "okay";
};
@@ -400,7 +400,7 @@
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
- cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
no-1-8-v;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 59e5d15e3ec4..ff41f83551de 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -258,6 +258,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
vmmc-supply = <&reg_3p3v>;
- cd-gpios = <&gpio1 4 0>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index 2c253d6d20bd..45e7c39e80d5 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -1,3 +1,5 @@
+#include <dt-bindings/gpio/gpio.h>
+
/ {
regulators {
compatible = "simple-bus";
@@ -181,7 +183,7 @@
&usdhc2 { /* module slot */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
- cd-gpios = <&gpio2 2 0>;
+ cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b5756c21ea1d..4493f6e99330 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -318,7 +318,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 86f03c1b147c..a857d1294609 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -324,7 +324,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 4a8d97f47759..1afe3385e2d2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -417,7 +417,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62a82f3eba88..6dd0b764e036 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -299,6 +299,6 @@
&pinctrl_hummingboard_usdhc2
>;
vmmc-supply = <&reg_3p3v>;
- cd-gpios = <&gpio1 4 0>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 3af16dfe417b..d7fe6672d00c 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -453,7 +453,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 0>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
@@ -461,7 +461,7 @@
&usdhc4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
- cd-gpios = <&gpio2 6 0>;
+ cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1ce6133b67f5..9e6ecd99b472 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -409,8 +409,8 @@
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
- cd-gpios = <&gpio1 4 0>;
- wp-gpios = <&gpio1 2 0>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@@ -418,7 +418,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3
&pinctrl_usdhc3_cdwp>;
- cd-gpios = <&gpio1 27 0>;
- wp-gpios = <&gpio1 29 0>;
+ cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640796ac..3373fd958e95 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -342,7 +342,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
- wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -351,6 +351,6 @@
pinctrl-0 = <&pinctrl_usdhc3>;
bus-width = <4>;
cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
- wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 3b24b12651b2..e329ca5c3322 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -467,8 +467,8 @@
pinctrl-0 = <&pinctrl_usdhc3>;
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
- cd-gpios = <&gpio6 15 0>;
- wp-gpios = <&gpio1 13 0>;
+ cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index e00c44f6a0df..782379320517 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -448,8 +448,8 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio7 0 0>;
- wp-gpios = <&gpio7 1 0>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
@@ -457,7 +457,7 @@
&usdhc4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
- cd-gpios = <&gpio2 6 0>;
+ cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a626e6dd8022..944eb81cb2b8 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -562,8 +562,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <8>;
- cd-gpios = <&gpio2 2 0>;
- wp-gpios = <&gpio2 3 0>;
+ cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -571,8 +571,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
bus-width = <8>;
- cd-gpios = <&gpio2 0 0>;
- wp-gpios = <&gpio2 1 0>;
+ cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index f02b80b41d4f..da08de324e9e 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -680,7 +680,7 @@
pinctrl-0 = <&pinctrl_usdhc1>;
bus-width = <4>;
no-1-8-v;
- cd-gpios = <&gpio7 2 0>;
+ cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
status = "okay";
};
@@ -690,7 +690,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
no-1-8-v;
- cd-gpios = <&gpio7 3 0>;
+ cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -9,6 +9,8 @@
*
*/
+#include <dt-bindings/gpio/gpio.h>
+
/ {
regulators {
compatible = "simple-bus";
@@ -250,13 +252,13 @@
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
- cd-gpios = <&gpio1 2 0>;
+ cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
status = "okay";
};
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- cd-gpios = <&gpio3 9 0>;
+ cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 945887d3fdb3..b84dff2e94ea 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -617,8 +617,8 @@
pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
bus-width = <8>;
- cd-gpios = <&gpio4 7 0>;
- wp-gpios = <&gpio4 6 0>;
+ cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -627,8 +627,8 @@
pinctrl-0 = <&pinctrl_usdhc2>;
pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
- cd-gpios = <&gpio5 0 0>;
- wp-gpios = <&gpio4 29 0>;
+ cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -637,6 +637,6 @@
pinctrl-0 = <&pinctrl_usdhc3>;
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
- cd-gpios = <&gpio3 22 0>;
+ cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index e3c0b63c2205..115f3fd78971 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -49,7 +49,7 @@
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
bus-width = <8>;
- cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
keep-power-in-suspend;
enable-sdio-wakeup;
@@ -61,7 +61,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
bus-width = <8>;
- cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
no-1-8-v;
keep-power-in-suspend;
enable-sdio-wakup;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index cef04cef3a80..ac88c3467078 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -293,7 +293,7 @@
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
bus-width = <8>;
- cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
keep-power-in-suspend;
enable-sdio-wakeup;
@@ -304,7 +304,7 @@
&usdhc4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
- cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 4d1a4b977d84..fdd1d7c9a5cc 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -234,8 +234,8 @@
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
- cd-gpios = <&gpio5 0 0>;
- wp-gpios = <&gpio5 1 0>;
+ cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
enable-sdio-wakeup;
keep-power-in-suspend;
status = "okay";
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 50e555eab50d..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -86,7 +86,7 @@
gpio,syscon-dev = <&devctrl 0x240>;
};
- pcie@21020000 {
+ pcie1: pcie@21020000 {
compatible = "ti,keystone-pcie","snps,dw-pcie";
clocks = <&clkpcie1>;
clock-names = "pcie";
@@ -96,6 +96,7 @@
ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
+ status = "disabled";
device_type = "pci";
num-lanes = <2>;
@@ -130,10 +131,17 @@
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
+
+ mdio: mdio@24200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x24200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2e-netcp.dtsi"
};
};
-
-&mdio {
- reg = <0x24200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
+
+ mdio: mdio@02090300 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x02090300 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2hk-netcp.dtsi"
};
};
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
};
soc {
-
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
@@ -79,6 +78,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
+
+ mdio: mdio@26200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x26200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2l-netcp.dtsi"
};
};
@@ -96,7 +106,3 @@
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
-
-&mdio {
- reg = <0x26200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index c06542b2c954..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
1 0 0x21000A00 0x00000100>;
};
- mdio: mdio@02090300 {
- compatible = "ti,keystone_mdio", "ti,davinci_mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x02090300 0x100>;
- status = "disabled";
- clocks = <&clkpa>;
- clock-names = "fck";
- bus_freq = <2500000>;
- };
-
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
@@ -286,7 +275,7 @@
ti,syscon-dev = <&devctrl 0x2a0>;
};
- pcie@21800000 {
+ pcie0: pcie@21800000 {
compatible = "ti,keystone-pcie", "snps,dw-pcie";
clocks = <&clkpcie>;
clock-names = "pcie";
@@ -296,6 +285,7 @@
ranges = <0x81000000 0 0 0x23250000 0 0x4000
0x82000000 0 0x50000000 0x50000000 0 0x10000000>;
+ status = "disabled";
device_type = "pci";
num-lanes = <2>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index 233c69e50ae3..df8908adb0cb 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -120,7 +120,7 @@
lcd0: display@0 {
compatible = "lgphilips,lb035q02";
- label = "lcd";
+ label = "lcd35";
reg = <1>; /* CS1 */
spi-max-frequency = <10000000>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
index f5395b7da912..048fd216970a 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
@@ -98,7 +98,7 @@
lcd0: display@0 {
compatible = "samsung,lte430wq-f0c", "panel-dpi";
- label = "lcd";
+ label = "lcd43";
pinctrl-names = "default";
pinctrl-0 = <&lte430_pins>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index f884d6adb71e..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
@@ -551,6 +552,7 @@
reg = <0x4a066000 0x100>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "mmu_dsp";
+ #iommu-cells = <0>;
};
mmu_ipu: mmu@55082000 {
@@ -558,6 +560,7 @@
reg = <0x55082000 0x100>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "mmu_ipu";
+ #iommu-cells = <0>;
ti,iommu-bus-err-back;
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7d24ae0306b5..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
@@ -612,6 +613,7 @@
reg = <0x4a066000 0x100>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "mmu_dsp";
+ #iommu-cells = <0>;
};
mmu_ipu: mmu@55082000 {
@@ -619,6 +621,7 @@
reg = <0x55082000 0x100>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "mmu_ipu";
+ #iommu-cells = <0>;
ti,iommu-bus-err-back;
};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 71468a7eb28f..5e17fd147728 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -60,27 +60,27 @@
rxc-skew-ps = <2000>;
};
-&mmc0 {
- vmmc-supply = <&regulator_3_3v>;
- vqmmc-supply = <&regulator_3_3v>;
-};
-
-&usb1 {
- status = "okay";
-};
-
&gpio2 {
status = "okay";
};
-&i2c1{
+&i2c1 {
status = "okay";
- accel1: accel1@53{
- compatible = "adxl34x";
+ accel1: accelerometer@53 {
+ compatible = "adi,adxl345";
reg = <0x53>;
- interrupt-parent = < &portc >;
+ interrupt-parent = <&portc>;
interrupts = <3 2>;
};
};
+
+&mmc0 {
+ vmmc-supply = <&regulator_3_3v>;
+ vqmmc-supply = <&regulator_3_3v>;
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index d42c84b1df8d..e48857249ce7 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr1310 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 9d342920695a..54bc6d3cf290 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr1310 SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index b23e05ed1d60..c611f5606dfe 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr1340 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 13e1aa33daa2..df2232d767ed 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr1340 SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 40accc87e3a2..14594ce8c18a 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr13xx SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 5de1431653e4..e859e8288bcd 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr300 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index f79b3dfaabe6..f4e92e599729 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr300 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index b09632963d15..070f2c1b7851 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr310 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index 95372080eea6..da210b454753 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr310 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index fdedbb514102..1b1034477923 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr320 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index ffea342aeec9..22be6e5edaac 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr320 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index f0e3fcf8e323..118135d75899 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr3xx SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
index 32dd55e5f4e6..6eaaf638e52e 100644
--- a/arch/arm/boot/dts/ste-ccu8540.dts
+++ b/arch/arm/boot/dts/ste-ccu8540.dts
@@ -17,6 +17,13 @@
model = "ST-Ericsson U8540 platform with Device Tree";
compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
+
memory@0 {
device_type = "memory";
reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts
index 651c56d400a4..c8b815819cfe 100644
--- a/arch/arm/boot/dts/ste-ccu9540.dts
+++ b/arch/arm/boot/dts/ste-ccu9540.dts
@@ -16,6 +16,13 @@
model = "ST-Ericsson CCU9540 platform with Device Tree";
compatible = "st-ericsson,ccu9540", "st-ericsson,u9540";
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
+
memory {
reg = <0x00000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 853684ad7773..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
#include "skeleton.dtsi"
/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "ste,dbx500-smp";
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+ };
+ CPU0: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x300>;
+ };
+ CPU1: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x301>;
+ };
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -22,32 +49,6 @@
interrupt-parent = <&intc>;
ranges;
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
- core1 {
- cpu = <&CPU1>;
- };
- };
- };
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- };
- };
-
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;
@@ -971,7 +972,7 @@
power-domains = <&pm_domains DOMAIN_VAPE>;
};
- uart@80120000 {
+ ux500_serial0: uart@80120000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x80120000 0x1000>;
interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
@@ -986,7 +987,7 @@
status = "disabled";
};
- uart@80121000 {
+ ux500_serial1: uart@80121000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x80121000 0x1000>;
interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -1001,7 +1002,7 @@
status = "disabled";
};
- uart@80007000 {
+ ux500_serial2: uart@80007000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x80007000 0x1000>;
interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 744c1e3a744d..6d8ce154347e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -32,11 +32,11 @@
status = "okay";
};
+ /* This UART is unused and thus left disabled */
uart@80121000 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&uart1_default_mode>;
pinctrl-1 = <&uart1_sleep_mode>;
- status = "okay";
};
uart@80007000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
index 2b1cb5b584b6..18e9795a94f9 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -17,6 +17,13 @@
model = "ST-Ericsson HREF (pre-v60) and ST UIB";
compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
+
soc {
/* Reset line for the BU21013 touchscreen */
i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 59523f866812..24739914e689 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -16,4 +16,11 @@
/ {
model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index 7f3975b58d16..b0278f4c486c 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -23,6 +23,11 @@
};
soc {
+ /* Enable UART1 on this board */
+ uart@80121000 {
+ status = "okay";
+ };
+
i2c@80004000 {
tps61052@33 {
compatible = "tps61052";
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
index 8c6a2de56cf1..c2e1ba019a2f 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -19,6 +19,13 @@
model = "ST-Ericsson HREF (v60+) and ST UIB";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
+
soc {
/* Reset line for the BU21013 touchscreen */
i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index d53cccdce776..ebd8547e98f1 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -18,4 +18,11 @@
/ {
model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index a4bc9e77d640..810cda743b6d 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -43,15 +43,26 @@
<&vaudio_hf_hrefv60_mode>,
<&gbf_hrefv60_mode>,
<&hdtv_hrefv60_mode>,
- <&touch_hrefv60_mode>;
+ <&touch_hrefv60_mode>,
+ <&gpios_hrefv60_mode>;
sdi0 {
- /* SD card detect GPIO pin, extend default state */
sdi0_default_mode: sdi0_default {
+ /* SD card detect GPIO pin, extend default state */
default_hrefv60_cfg1 {
pins = "GPIO95_E8";
ste,config = <&gpio_in_pu>;
};
+ /* VMMCI level-shifter enable */
+ default_hrefv60_cfg2 {
+ pins = "GPIO169_D22";
+ ste,config = <&gpio_out_lo>;
+ };
+ /* VMMCI level-shifter voltage select */
+ default_hrefv60_cfg3 {
+ pins = "GPIO5_AG6";
+ ste,config = <&gpio_out_hi>;
+ };
};
};
ipgpio {
@@ -213,6 +224,16 @@
};
};
};
+ gpios {
+ /* Dangling GPIO pins */
+ gpios_hrefv60_mode: gpios_hrefv60 {
+ default_cfg1 {
+ /* Normally UART1 RXD, now dangling */
+ pins = "GPIO4_AH6";
+ ste,config = <&in_pu>;
+ };
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
};
aliases {
+ serial1 = &uart1;
stmpe-i2c0 = &stmpe0;
stmpe-i2c1 = &stmpe1;
};
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
};
+ aliases {
+ serial1 = &uart1;
+ };
+
src@101e0000 {
/* These chrystal drivers are not used on this board */
disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
clock-names = "uartclk", "apb_pclk";
pinctrl-names = "default";
pinctrl-0 = <&uart0_default_mux>;
+ status = "disabled";
};
uart1: uart@101fb000 {
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 9edadc37719f..32a5ccb14e7e 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -18,6 +18,13 @@
model = "Calao Systems Snowball platform with device tree";
compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
+ /* This stablilizes the serial port enumeration */
+ aliases {
+ serial0 = &ux500_serial0;
+ serial1 = &ux500_serial1;
+ serial2 = &ux500_serial2;
+ };
+
memory {
reg = <0x00000000 0x20000000>;
};
@@ -223,11 +230,11 @@
status = "okay";
};
+ /* This UART is unused and thus left disabled */
uart@80121000 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&uart1_default_mode>;
pinctrl-1 = <&uart1_sleep_mode>;
- status = "okay";
};
uart@80007000 {
@@ -452,7 +459,21 @@
pins = "GPIO21_AB3"; /* DAT31DIR */
ste,config = <&out_hi>;
};
-
+ /* SD card detect GPIO pin, extend default state */
+ snowball_cfg2 {
+ pins = "GPIO218_AH11";
+ ste,config = <&gpio_in_pu>;
+ };
+ /* VMMCI level-shifter enable */
+ snowball_cfg3 {
+ pins = "GPIO217_AH12";
+ ste,config = <&gpio_out_lo>;
+ };
+ /* VMMCI level-shifter voltage select */
+ snowball_cfg4 {
+ pins = "GPIO228_AJ6";
+ ste,config = <&gpio_out_hi>;
+ };
};
};
ssp0 {
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 83c50193626c..30b3bc1666d2 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 6f225acc07c5..b7f6fb462ea0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
*/
static inline phys_addr_t __virt_to_idmap(unsigned long x)
{
- if (arch_virt_to_idmap)
+ if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
return arch_virt_to_idmap(x);
else
return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/mm-arch-hooks.h b/arch/arm/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7056660c7cc4..000000000000
--- a/arch/arm/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARM_MM_ARCH_HOOKS_H
-#define _ASM_ARM_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57ea83f4..54272e0be713 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -818,12 +818,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
- of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
+ of_node_put(dn);
break;
}
+ of_node_put(dn);
irqs[i] = cpu;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232796be..38269358fd25 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
flush_cache_all();
/* Switch to the identity mapping. */
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr);
/* Should never get here. */
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..4b39af2dfda9 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
- atomic = in_atomic();
+ atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
- kfree(pd->pd.name);
+ kfree_const(pd->pd.name);
kfree(pd);
- of_node_put(np);
continue;
}
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 80bad29d609a..8c4467fad837 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
}
}
-#ifdef CONFIG_PM_GENERIC_DOMAINS
-
static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
{
int iso, iso2sw;
@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
- bool is_off;
int i;
imx6q_pu_domain.reg = pu_reg;
@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
}
imx6q_pu_domain.num_clks = i;
- is_off = IS_ENABLED(CONFIG_PM);
- if (is_off) {
- _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
- } else {
- /*
- * Enable power if compiled without CONFIG_PM in case the
- * bootloader disabled it.
- */
- imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
- }
+ /* Enable power always in case bootloader disabled it. */
+ imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
+ return 0;
- pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
+ pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
return of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
@@ -437,13 +429,6 @@ clk_err:
return -EINVAL;
}
-#else
-static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
-{
- return 0;
-}
-#endif /* CONFIG_PM_GENERIC_DOMAINS */
-
static int imx_gpc_probe(struct platform_device *pdev)
{
struct regulator *pu_reg;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ecc04ff13e95..4a023e8d1bdb 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -60,6 +60,7 @@ config SOC_AM43XX
select ARM_GIC
select MACH_OMAP_GENERIC
select MIGHT_HAVE_CACHE_L2X0
+ select HAVE_ARM_SCU
config SOC_DRA7XX
bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621b5a6b..e1d2e991d17a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
* registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly.
*
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
* -ENXIO on absent or invalid register target address space.
*/
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
_save_mpu_port_index(oh);
+ /* if we don't need sysc access we don't need to ioremap */
+ if (!oh->class->sysc)
+ return 0;
+
+ /* we can't continue without MPU PORT if we need sysc access */
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return -ENXIO;
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
oh->name);
/* Extract the IO space from device tree blob */
- if (!np)
+ if (!np) {
+ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
return -ENXIO;
+ }
va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->name, np->name);
}
- if (oh->class->sysc) {
- r = _init_mpu_rt_base(oh, NULL, index, np);
- if (r < 0) {
- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
- oh->name);
- return 0;
- }
+ r = _init_mpu_rt_base(oh, NULL, index, np);
+ if (r < 0) {
+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+ oh->name);
+ return 0;
}
r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
.class = &dra7xx_gpmc_hwmod_class,
.clkdm_name = "l3main1_clkdm",
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
- .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
+ .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index c092730749b9..bf366b39fa61 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -24,6 +24,7 @@
#include <linux/ata_platform.h>
#include <linux/serial_8250.h>
#include <linux/gpio.h>
+#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
capc7117_uarts_init();
capc7117_ide_init();
+
+ regulator_has_full_constraints();
}
MACHINE_START(CAPC7117,
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index bb99f59a36d8..a17a91eb8e9a 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -13,6 +13,7 @@
#include <linux/syscore_ops.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/regulator/machine.h>
#include <linux/dm9000.h>
#include <linux/leds.h>
@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
cmx2xx_init_ac97();
cmx2xx_init_touchscreen();
cmx2xx_init_leds();
+
+ regulator_has_full_constraints();
}
static void __init cmx2xx_init_irq(void)
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 4d3588d26c2a..5851f4c254c1 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
cm_x300_init_ac97();
cm_x300_init_wi2wi();
cm_x300_init_bl();
+
+ regulator_has_full_constraints();
}
static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 5f9d9303b346..3503826333c7 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -18,6 +18,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/regulator/machine.h>
#include <linux/ucb1400.h>
#include <asm/mach/arch.h>
@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
colibri_pxa270_baseboard);
}
+
+ regulator_has_full_constraints();
}
/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 51531ecffca8..9d7072b04045 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
em_x270_init_i2c();
em_x270_init_camera();
em_x270_userspace_consumers_init();
+
+ regulator_has_full_constraints();
}
MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index c98511c5abd1..9b0eb0252af6 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -26,6 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/machine.h>
#include "generic.h"
@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
mxm_8x10_mmc_init();
icontrol_can_init();
+
+ regulator_has_full_constraints();
}
MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 872dcb20e757..066e3a250ee0 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -26,6 +26,7 @@
#include <linux/dm9000.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h>
+#include <linux/regulator/machine.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
BCR_writew(trizeps_conxs_bcr);
board_backlight_power(1);
+
+ regulator_has_full_constraints();
}
static void __init trizeps4_map_io(void)
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index aa89488f961e..54122a983ae3 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -24,6 +24,7 @@
#include <linux/dm9000.h>
#include <linux/ucb1400.h>
#include <linux/ata_platform.h>
+#include <linux/regulator/machine.h>
#include <linux/regulator/max1586.h>
#include <linux/i2c/pxa-i2c.h>
@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
vpac270_ts_init();
vpac270_rtc_init();
vpac270_ide_init();
+
+ regulator_has_full_constraints();
}
MACHINE_START(VPAC270, "Voipac PXA270")
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index ac2ae5c71ab4..6158566fa0f7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -868,6 +868,8 @@ static void __init zeus_init(void)
i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
+
+ regulator_has_full_constraints();
}
static struct map_desc zeus_io_desc[] __initdata = {
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index a99d90a4d09c..06640914d9a0 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009-2012 ST Microelectronics
* Rajeev Kumar <rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/irqs.h b/arch/arm/mach-spear/include/mach/irqs.h
index 92da0a8c6bce..7058720c5278 100644
--- a/arch/arm/mach-spear/include/mach/irqs.h
+++ b/arch/arm/mach-spear/include/mach/irqs.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009-2012 ST Microelectronics
* Rajeev Kumar <rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/misc_regs.h b/arch/arm/mach-spear/include/mach/misc_regs.h
index 935639ce59ba..cfaf7c665b58 100644
--- a/arch/arm/mach-spear/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
* Miscellaneous registers definitions for SPEAr3xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h
index f2d6a0176575..5ed841ccf8a3 100644
--- a/arch/arm/mach-spear/include/mach/spear.h
+++ b/arch/arm/mach-spear/include/mach/spear.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009,2012 ST Microelectronics
* Rajeev Kumar<rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/uncompress.h b/arch/arm/mach-spear/include/mach/uncompress.h
index 51b2dc93e4da..8439b9c12edb 100644
--- a/arch/arm/mach-spear/include/mach/uncompress.h
+++ b/arch/arm/mach-spear/include/mach/uncompress.h
@@ -4,7 +4,7 @@
* Serial port stubs for kernel decompress status messages
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.c b/arch/arm/mach-spear/pl080.c
index cfa1199d0f4a..b4529f3e0ee9 100644
--- a/arch/arm/mach-spear/pl080.c
+++ b/arch/arm/mach-spear/pl080.c
@@ -4,7 +4,7 @@
* DMAC pl080 definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.h b/arch/arm/mach-spear/pl080.h
index eb6590ded40d..608dec6725ae 100644
--- a/arch/arm/mach-spear/pl080.h
+++ b/arch/arm/mach-spear/pl080.h
@@ -4,7 +4,7 @@
* DMAC pl080 definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c
index ce5e098c4888..b4342155a783 100644
--- a/arch/arm/mach-spear/restart.c
+++ b/arch/arm/mach-spear/restart.c
@@ -4,7 +4,7 @@
* SPEAr platform specific restart functions
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1310.c b/arch/arm/mach-spear/spear1310.c
index d9ce4d8000f0..cd5d375d91f0 100644
--- a/arch/arm/mach-spear/spear1310.c
+++ b/arch/arm/mach-spear/spear1310.c
@@ -4,7 +4,7 @@
* SPEAr1310 machine source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1340.c b/arch/arm/mach-spear/spear1340.c
index 3f3c0f124bd3..94594d5a446c 100644
--- a/arch/arm/mach-spear/spear1340.c
+++ b/arch/arm/mach-spear/spear1340.c
@@ -4,7 +4,7 @@
* SPEAr1340 machine source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 2e463a93468d..b7afce6795f4 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -4,7 +4,7 @@
* SPEAr13XX machines common source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear300.c b/arch/arm/mach-spear/spear300.c
index b52e48f342f4..5b32edda2276 100644
--- a/arch/arm/mach-spear/spear300.c
+++ b/arch/arm/mach-spear/spear300.c
@@ -4,7 +4,7 @@
* SPEAr300 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear310.c b/arch/arm/mach-spear/spear310.c
index ed2029db391f..86a44ac7ff67 100644
--- a/arch/arm/mach-spear/spear310.c
+++ b/arch/arm/mach-spear/spear310.c
@@ -4,7 +4,7 @@
* SPEAr310 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c
index bf634b32a930..d45d751926c5 100644
--- a/arch/arm/mach-spear/spear320.c
+++ b/arch/arm/mach-spear/spear320.c
@@ -4,7 +4,7 @@
* SPEAr320 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index bf3b1fd8cb23..23394ac76cf2 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -4,7 +4,7 @@
* SPEAr3XX machines common source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1ced8a0f7a52..cba12f34ff77 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1971,7 +1971,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
{
int next_bitmap;
- if (mapping->nr_bitmaps > mapping->extensions)
+ if (mapping->nr_bitmaps >= mapping->extensions)
return -EINVAL;
next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0716bbe19872..de2b246fed38 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
-1:
+1: adr r12, __v7_setup_stack @ the local stack
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ bl v7_invalidate_l1
+ ldmia r12, {r0-r5, lr}
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
mcreq p15, 0, r0, c1, c0, 1
#endif
- b __v7_setup
+ b __v7_setup_cont
/*
* Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, r7, r9, r11, lr}
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
- ldmia r12, {r0-r5, r7, r9, r11, lr}
+ ldmia r12, {r0-r5, lr}
+__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
teq r0, #0x41000000
bne __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
.align 2
__v7_setup_stack:
- .space 4 * 11 @ 11 registers
+ .space 4 * 7 @ 12 registers
__INITDATA
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
int bpf_jit_enable __read_mostly;
-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+ unsigned int size)
+{
+ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+
+ if (!ptr)
+ return -EFAULT;
+ memcpy(ret, ptr, size);
+ return 0;
+}
+
+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
{
u8 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 1);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 1);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 1);
return (u64)err << 32 | ret;
}
-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
{
u16 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 2);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 2);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 2);
return (u64)err << 32 | ntohs(ret);
}
-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{
u32 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 4);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 4);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 4);
return (u64)err << 32 | ntohl(ret);
}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
case BPF_LD | BPF_B | BPF_ABS:
load_order = 0;
load:
- /* the interpreter will deal with the negative K */
- if ((int)k < 0)
- return -ENOTSUPP;
emit_mov_i(r_off, k, ctx);
load_common:
ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
emit(ARM_SUB_I(r_scratch, r_skb_hl,
1 << load_order), ctx);
emit(ARM_CMP_R(r_scratch, r_off), ctx);
- condt = ARM_COND_HS;
+ condt = ARM_COND_GE;
} else {
emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
condt = ARM_COND_HI;
}
+ /*
+ * test for negative offset, only if we are
+ * currently scheduled to take the fast
+ * path. this will update the flags so that
+ * the slowpath instruction are ignored if the
+ * offset is negative.
+ *
+ * for loard_order == 0 the HI condition will
+ * make loads at offset 0 take the slow path too.
+ */
+ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+
_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
ctx);
@@ -860,9 +889,11 @@ b_epilogue:
off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
- OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
- else
- OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
+ OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
+ else {
+ OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
+ OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
+ }
break;
case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>,
- <0x0 0x1b008000 0x0 0x2000>,
+ <0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index b112a39834d0..70fd9ffb58cf 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += msi.h
diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 562b655f5ba9..000000000000
--- a/arch/arm64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
-#define _ASM_ARM64_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
/* Show what we know for posterity */
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
- sizeof(vendor));
+ sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
vendor[i] = '\0';
- early_memunmap(c16, sizeof(vendor));
+ early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f860bfda454a..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
*
*/
ENTRY(cpu_switch_to)
- add x8, x0, #THREAD_CPU_CONTEXT
+ mov x10, #THREAD_CPU_CONTEXT
+ add x8, x0, x10
mov x9, sp
stp x19, x20, [x8], #16 // store callee-saved registers
stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
stp x27, x28, [x8], #16
stp x29, x9, [x8], #16
str lr, [x8]
- add x8, x1, #THREAD_CPU_CONTEXT
+ add x8, x1, x10
ldp x19, x20, [x8], #16 // restore callee-saved registers
ldp x21, x22, [x8], #16
ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = d->affinity;
+ const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(d->affinity, affinity);
+ cpumask_copy(irq_data_get_affinity_mask(d), affinity);
return ret;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
- xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 1d66afdfac07..f61f2dd67464 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/avr32/include/asm/mm-arch-hooks.h b/arch/avr32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 145452ffbdad..000000000000
--- a/arch/avr32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H
-#define _ASM_AVR32_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
#include <mach/pm.h>
+static bool disable_cpu_idle_poll;
static cycle_t read_cycle_count(struct clocksource *cs)
{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
return 0;
}
-static void comparator_mode(enum clock_event_mode mode,
- struct clock_event_device *evdev)
+static int comparator_shutdown(struct clock_event_device *evdev)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- pr_debug("%s: start\n", evdev->name);
- /* FALLTHROUGH */
- case CLOCK_EVT_MODE_RESUME:
+ pr_debug("%s: %s\n", __func__, evdev->name);
+ sysreg_write(COMPARE, 0);
+
+ if (disable_cpu_idle_poll) {
+ disable_cpu_idle_poll = false;
/*
- * If we're using the COUNT and COMPARE registers we
- * need to force idle poll.
+ * Only disable idle poll if we have forced that
+ * in a previous call.
*/
- cpu_idle_poll_ctrl(true);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- sysreg_write(COMPARE, 0);
- pr_debug("%s: stop\n", evdev->name);
- if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
- evdev->mode == CLOCK_EVT_MODE_RESUME) {
- /*
- * Only disable idle poll if we have forced that
- * in a previous call.
- */
- cpu_idle_poll_ctrl(false);
- }
- break;
- default:
- BUG();
+ cpu_idle_poll_ctrl(false);
}
+ return 0;
+}
+
+static int comparator_set_oneshot(struct clock_event_device *evdev)
+{
+ pr_debug("%s: %s\n", __func__, evdev->name);
+
+ disable_cpu_idle_poll = true;
+ /*
+ * If we're using the COUNT and COMPARE registers we
+ * need to force idle poll.
+ */
+ cpu_idle_poll_ctrl(true);
+
+ return 0;
}
static struct clock_event_device comparator = {
- .name = "avr32_comparator",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 16,
- .rating = 50,
- .set_next_event = comparator_next_event,
- .set_mode = comparator_mode,
+ .name = "avr32_comparator",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 16,
+ .rating = 50,
+ .set_next_event = comparator_next_event,
+ .set_state_shutdown = comparator_shutdown,
+ .set_state_oneshot = comparator_set_oneshot,
+ .tick_resume = comparator_set_oneshot,
};
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
{
unsigned long flags;
+ if (!clk)
+ return 0;
+
spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
unsigned long flags;
unsigned long rate;
+ if (!clk)
+ return 0;
+
spin_lock_irqsave(&clk_lock, flags);
rate = clk->get_rate(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags, actual_rate;
+ if (!clk)
+ return 0;
+
if (!clk->set_rate)
return -ENOSYS;
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
unsigned long flags;
long ret;
+ if (!clk)
+ return 0;
+
if (!clk->set_rate)
return -ENOSYS;
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
unsigned long flags;
int ret;
+ if (!clk)
+ return 0;
+
if (!clk->set_parent)
return -ENOSYS;
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
struct clk *clk_get_parent(struct clk *clk)
{
- return clk->parent;
+ return !clk ? NULL : clk->parent;
}
EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 07051a63415d..61cd1e786a14 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
diff --git a/arch/blackfin/include/asm/mm-arch-hooks.h b/arch/blackfin/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 1c5211ec338f..000000000000
--- a/arch/blackfin/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H
-#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 7aeb32272975..f17c4dc6050c 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h
diff --git a/arch/c6x/include/asm/mm-arch-hooks.h b/arch/c6x/include/asm/mm-arch-hooks.h
deleted file mode 100644
index bb3c4a6ce8e9..000000000000
--- a/arch/c6x/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_C6X_MM_ARCH_HOOKS_H
-#define _ASM_C6X_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d294f6aaff1d..ad2244f35bca 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += module.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/cris/include/asm/mm-arch-hooks.h b/arch/cris/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 314f774db2b0..000000000000
--- a/arch/cris/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H
-#define _ASM_CRIS_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 30edce31e5c2..8e47b832cc76 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -4,5 +4,6 @@ generic-y += cputime.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/mm-arch-hooks.h b/arch/frv/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 51d13a870404..000000000000
--- a/arch/frv/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_FRV_MM_ARCH_HOOKS_H
-#define _ASM_FRV_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 00379d64f707..70e6ae1e7006 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -33,6 +33,7 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 5ade4a163558..daee37bd0999 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += kmap_types.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += pci.h
diff --git a/arch/hexagon/include/asm/mm-arch-hooks.h b/arch/hexagon/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 05e8b939e416..000000000000
--- a/arch/hexagon/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H
-#define _ASM_HEXAGON_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index ccff13d33fa2..9de3ba12f6b9 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += exec.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += vtime.h
diff --git a/arch/ia64/include/asm/mm-arch-hooks.h b/arch/ia64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index ab4b5c698322..000000000000
--- a/arch/ia64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_IA64_MM_ARCH_HOOKS_H
-#define _ASM_IA64_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index ba1cdc018731..e0eb704ca1fa 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += cputime.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += module.h
generic-y += preempt.h
generic-y += sections.h
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 0c3f25ee3381..f8de767ce2bc 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
#define iowrite16 writew
#define iowrite32 writel
+#define ioread16be(addr) be16_to_cpu(readw(addr))
+#define ioread32be(addr) be32_to_cpu(readl(addr))
+#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
+#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
+
#define mmiowb()
#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/m32r/include/asm/mm-arch-hooks.h b/arch/m32r/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d60b4750f41..000000000000
--- a/arch/m32r/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_M32R_MM_ARCH_HOOKS_H
-#define _ASM_M32R_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 33013dfcd3e1..c496d48a8c8d 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -125,6 +125,13 @@ endif # M68KCLASSIC
if COLDFIRE
+choice
+ prompt "ColdFire SoC type"
+ default M520x
+ help
+ Select the type of ColdFire System-on-Chip (SoC) that you want
+ to build for.
+
config M5206
bool "MCF5206"
depends on !MMU
@@ -174,9 +181,6 @@ config M525x
help
Freescale (Motorola) Coldfire 5251/5253 processor support.
-config M527x
- bool
-
config M5271
bool "MCF5271"
depends on !MMU
@@ -223,9 +227,6 @@ config M5307
help
Motorola ColdFire 5307 processor support.
-config M53xx
- bool
-
config M532x
bool "MCF532x"
depends on !MMU
@@ -251,9 +252,6 @@ config M5407
help
Motorola ColdFire 5407 processor support.
-config M54xx
- bool
-
config M547x
bool "MCF547x"
select M54xx
@@ -280,6 +278,17 @@ config M5441x
help
Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
+endchoice
+
+config M527x
+ bool
+
+config M53xx
+ bool
+
+config M54xx
+ bool
+
endif # COLDFIRE
@@ -416,22 +425,18 @@ config HAVE_MBAR
config HAVE_IPSBAR
bool
-config CLOCK_SET
- bool "Enable setting the CPU clock frequency"
- depends on COLDFIRE
- default n
- help
- On some CPU's you do not need to know what the core CPU clock
- frequency is. On these you can disable clock setting. On some
- traditional 68K parts, and on all ColdFire parts you need to set
- the appropriate CPU clock frequency. On these devices many of the
- onboard peripherals derive their timing from the master CPU clock
- frequency.
-
config CLOCK_FREQ
int "Set the core clock frequency"
+ default "25000000" if M5206
+ default "54000000" if M5206e
+ default "166666666" if M520x
+ default "140000000" if M5249
+ default "150000000" if M527x || M523x
+ default "90000000" if M5307
+ default "50000000" if M5407
+ default "266000000" if M54xx
default "66666666"
- depends on CLOCK_SET
+ depends on COLDFIRE
help
Define the CPU clock frequency in use. This is the core clock
frequency, it may or may not be the same as the external clock
diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig
index e7292f460af4..4c7b7938d53a 100644
--- a/arch/m68k/configs/m5208evb_defconfig
+++ b/arch/m68k/configs/m5208evb_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -16,17 +12,12 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_M520x=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=166666666
-CONFIG_CLOCK_DIV=2
-CONFIG_M5208EVB=y
+# CONFIG_MMU is not set
# CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x40000000
CONFIG_RAMSIZE=0x2000000
CONFIG_VECTORBASE=0x40000000
CONFIG_KERNELBASE=0x40020000
-CONFIG_RAM16BIT=y
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -40,24 +31,19 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_BAUDRATE=115200
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
@@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_FULLDEBUG=y
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
+CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig
index 0cd4b39f325b..a782f368650f 100644
--- a/arch/m68k/configs/m5249evb_defconfig
+++ b/arch/m68k/configs/m5249evb_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
CONFIG_M5249=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=140000000
-CONFIG_CLOCK_DIV=2
CONFIG_M5249C3=y
CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000
@@ -38,23 +32,18 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
@@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig
index a60cb3509135..6f5fb92f5cbf 100644
--- a/arch/m68k/configs/m5272c3_defconfig
+++ b/arch/m68k/configs/m5272c3_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -16,8 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
CONFIG_M5272=y
-CONFIG_CLOCK_SET=y
CONFIG_M5272C3=y
CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000
@@ -36,23 +32,18 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
@@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index e6502ab7cb2f..b5d7cd1ce856 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -16,11 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
CONFIG_M5275=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=150000000
-CONFIG_CLOCK_DIV=2
-CONFIG_M5275EVB=y
# CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00000000
@@ -39,24 +32,19 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
@@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig
index 023812abd2e6..1b4c09461c40 100644
--- a/arch/m68k/configs/m5307c3_defconfig
+++ b/arch/m68k/configs/m5307c3_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
CONFIG_M5307=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=90000000
-CONFIG_CLOCK_DIV=2
CONFIG_M5307C3=y
CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000
@@ -38,16 +32,11 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y
CONFIG_SLIP=y
CONFIG_SLIP_COMPRESSED=y
@@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_FULLDEBUG=y
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
+CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig
index 557b39f3be90..275ad543d4bc 100644
--- a/arch/m68k/configs/m5407c3_defconfig
+++ b/arch/m68k/configs/m5407c3_defconfig
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
CONFIG_M5407=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=50000000
CONFIG_M5407C3=y
CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00000000
@@ -38,22 +33,17 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y
# CONFIG_INPUT is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
@@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index c5018a68819b..4f4ccd13c11b 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -1,11 +1,7 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -20,19 +16,16 @@ CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_COLDFIRE=y
-CONFIG_M547x=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=266000000
# CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x0
CONFIG_RAMSIZE=0x2000000
CONFIG_VECTORBASE=0x0
CONFIG_MBAR=0xff000000
CONFIG_KERNELBASE=0x20000
+CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1555bc189c7d..eb85bd9c6180 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += mutex.h
generic-y += percpu.h
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index c94557b91448..50aa4dac9ca2 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -19,7 +19,7 @@
* in any case new boards come along from time to time that have yet
* another different clocking frequency.
*/
-#ifdef CONFIG_CLOCK_SET
+#ifdef CONFIG_CLOCK_FREQ
#define MCF_CLK CONFIG_CLOCK_FREQ
#else
#error "Don't know what your ColdFire CPU clock frequency is??"
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 618c85d3c786..f55cad529400 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -413,7 +413,8 @@ static inline void isa_delay(void)
#define writew(val, addr) out_le16((addr), (val))
#endif /* CONFIG_ATARI_ROM_ISA */
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
+#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \
+ !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE))
/*
* We need to define dummy functions for GENERIC_IOMAP support.
*/
diff --git a/arch/m68k/include/asm/mm-arch-hooks.h b/arch/m68k/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7e8709bc90ae..000000000000
--- a/arch/m68k/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_M68K_MM_ARCH_HOOKS_H
-#define _ASM_M68K_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 199320f3c345..df31353fd200 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
diff --git a/arch/metag/include/asm/mm-arch-hooks.h b/arch/metag/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b0072b2eb0de..000000000000
--- a/arch/metag/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_METAG_MM_ARCH_HOOKS_H
-#define _ASM_METAG_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9989ddb169ca..2f222f355c4b 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += syscalls.h
generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/mm-arch-hooks.h b/arch/microblaze/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5c4065911bda..000000000000
--- a/arch/microblaze/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
-#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index aab7e46cadd5..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
select BCM7120_L2_IRQ
select BRCMSTB_L2_IRQ
select IRQ_MIPS_CPU
- select RAW_IRQ_ACCESSORS
select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -1427,6 +1426,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
+ select MIPS_O32_FP64_SUPPORT if MIPS32_O32
help
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -2262,11 +2262,6 @@ config MIPS_CM
config MIPS_CPC
bool
-config SB1_PASS_1_WORKAROUNDS
- bool
- depends on CPU_SB1_PASS_1
- default y
-
config SB1_PASS_2_WORKAROUNDS
bool
depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ae2dd59050f7..252e347958f3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -181,13 +181,6 @@ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
-ifdef CONFIG_CPU_SB1
-ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
-KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds
-KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
-endif
-endif
-
# For smartmips configurations, there are hundreds of warnings due to ISA overrides
# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 7fe5c61a3cb8..1f8546081d20 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += emergency-restart.h
generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 084780b355aa..1b0625189835 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
goto fr_common;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
|| defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 0a227d426b9c..520f8fc2c806 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -13,8 +13,7 @@
#define R4600_V2_HIT_CACHEOP_WAR 0
#define R5432_CP0_INTERRUPT_WAR 0
-#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
- defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
+#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
#ifndef __ASSEMBLY__
extern int sb1250_m3_workaround_needed(void);
diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b5609fe8e475..000000000000
--- a/arch/mips/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H
-#define _ASM_MIPS_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
-extern asmlinkage void smp_call_function_interrupt(void);
-
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 6c9906f59c6e..9081d88ae44f 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -16,7 +16,7 @@
/*
* Keep this struct definition in sync with the sigcontext fragment
- * in arch/mips/tools/offset.c
+ * in arch/mips/kernel/asm-offsets.c
*/
struct sigcontext {
unsigned int sc_regmask; /* Unused */
@@ -46,7 +46,7 @@ struct sigcontext {
#include <linux/posix_types.h>
/*
* Keep this struct definition in sync with the sigcontext fragment
- * in arch/mips/tools/offset.c
+ * in arch/mips/kernel/asm-offsets.c
*
* Warning: this structure illdefined with sc_badvaddr being just an unsigned
* int so it was changed to unsigned long in 2.6.0-test1. This may break
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index beabe19ff8e5..072fab13645d 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -1,5 +1,5 @@
/*
- * offset.c: Calculate pt_regs and task_struct offsets.
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
- __BUILD_clear_\clear
+ __build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
- PTR_ADD s4, s4, SZREG
- PTR_ADD s2, s2, SZREG
- LONG_SUB s6, s6, 1
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
- irq_enter();
- generic_smp_call_function_interrupt();
- irq_exit();
-}
-
static void stop_this_cpu(void *dummy)
{
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
+ set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
show_code((unsigned int __user *) regs->cp0_epc);
+ set_fs(old_fs);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
#endif
goto bad_area;
}
- if (!(vma->vm_flags & VM_READ)) {
+ if (!(vma->vm_flags & VM_READ) &&
+ exception_epc(regs) != address) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void)
{
- /* stop the clock whilst setting it up */
- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+ unsigned char freq, ctrl;
- /* 32KHz time base */
- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+ /* Set 32KHz time base if not already set */
+ freq = CMOS_READ(RTC_FREQ_SELECT);
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
- /* start the clock */
- CMOS_WRITE(RTC_24H, RTC_CONTROL);
+ /* Ensure SET bit is clear so RTC can run */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (ctrl & RTC_SET)
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
clear_c0_eimr(irq);
ack_c0_eirr(irq);
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
set_c0_eimr(irq);
}
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
int get_c0_fdc_int(void)
{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else
#endif
{
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index a8bb972fd9fd..cb9a095f5c5e 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -81,11 +81,6 @@ choice
prompt "SiByte SOC Stepping"
depends on SIBYTE_SB1xxx_SOC
-config CPU_SB1_PASS_1
- bool "1250 Pass1"
- depends on SIBYTE_SB1250
- select CPU_HAS_PREFETCH
-
config CPU_SB1_PASS_2_1250
bool "1250 An"
depends on SIBYTE_SB1250
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
-extern void smp_call_function_interrupt(void);
-
/*
* These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 5581844c9194..41a1d2242211 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -81,10 +81,7 @@ void check_bus_watcher(void)
{
u32 status, l2_err, memio_err;
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
- /* Destructive read, clears register and interrupt */
- status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
-#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
+#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
/* Use non-destructive register */
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 3c02b2a77ae9..9d3c24efdf4a 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -202,12 +202,10 @@ void __init sb1250_setup(void)
switch (war_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
-#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
"and the kernel doesn't have the proper "
"workarounds compiled in. @@@@\n");
bad_config = 1;
-#endif
break;
case K_SYS_REVISION_BCM1250_PASS2:
/* Pass 2 - easiest as default for now - so many numbers */
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index de30b0c88796..6edb9ee6128e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/mm-arch-hooks.h b/arch/mn10300/include/asm/mm-arch-hooks.h
deleted file mode 100644
index e2029a652f4c..000000000000
--- a/arch/mn10300/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H
-#define _ASM_MN10300_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 434639d510b3..914864eb5a25 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h
diff --git a/arch/nios2/include/asm/mm-arch-hooks.h b/arch/nios2/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d7290dc68558..000000000000
--- a/arch/nios2/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H
-#define _ASM_NIOS2_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e5a693b16da2..443f44de1020 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -17,6 +17,7 @@ config OPENRISC
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_CPU_DEVICES
+ select HAVE_UID16
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
@@ -31,9 +32,6 @@ config MMU
config HAVE_DMA_ATTRS
def_bool y
-config UID16
- def_bool y
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 2a2e39b8109a..2832f031fb11 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -36,6 +36,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h
diff --git a/arch/openrisc/include/asm/mm-arch-hooks.h b/arch/openrisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d33cb555fe1..000000000000
--- a/arch/openrisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H
-#define _ASM_OPENRISC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 12b341d04f88..f9b3a81aefcd 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
diff --git a/arch/parisc/include/asm/mm-arch-hooks.h b/arch/parisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 654ec63b0ee9..000000000000
--- a/arch/parisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H
-#define _ASM_PARISC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3a08eae3318f..3edbb9fc91b4 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
/*
* This is the permanent pmd attached to the pgd;
* cannot free it.
@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
*/
mm_inc_nr_pmds(mm);
return;
+ }
free_pages((unsigned long)pmd, PMD_ORDER);
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
}
/* Unmask the event */
- if (eeh_enabled())
+ if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
enable_irq(eeh_event_irq);
return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit,
- unsigned long *current_offset)
+ unsigned long *current_offset, unsigned long *total_allocated)
{
struct page *tce_mem = NULL;
__be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
}
addr = page_address(tce_mem);
memset(addr, 0, allocated);
+ *total_allocated += allocated;
--levels;
if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
for (i = 0; i < entries; ++i) {
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
- levels, limit, current_offset);
+ levels, limit, current_offset, total_allocated);
if (!tmp)
break;
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
struct iommu_table *tbl)
{
void *addr;
- unsigned long offset = 0, level_shift;
+ unsigned long offset = 0, level_shift, total_allocated = 0;
const unsigned window_shift = ilog2(window_size);
unsigned entries_shift = window_shift - page_shift;
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset);
+ levels, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */
if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
page_shift);
tbl->it_level_size = 1ULL << (level_shift - 3);
tbl->it_indirect_levels = levels - 1;
- tbl->it_allocated_size = offset;
+ tbl->it_allocated_size = total_allocated;
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index dc5385ebb071..5ad26dd94d77 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -3,5 +3,6 @@
generic-y += clkdev.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index cfad7fca01d6..d7697ab802f6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -57,7 +57,10 @@ union ctlreg0 {
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
- unsigned long : 23;
+ unsigned long : 4;
+ unsigned long afp : 1; /* AFP-register control */
+ unsigned long vx : 1; /* Vector enablement control */
+ unsigned long : 17;
};
};
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 0130d0379edd..d9be7c0c1291 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -14,6 +14,7 @@
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range free_pgd_range
+#define hugepages_supported() (MACHINE_HAS_HPAGE)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 07680b2f3c59..000000000000
--- a/arch/s390/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_S390_MM_ARCH_HOOKS_H
-#define _ASM_S390_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index dd345238d9a7..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -17,10 +17,7 @@
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
-#include <asm/setup.h>
-#ifndef __ASSEMBLY__
-
-extern int HPAGE_SHIFT;
+#define HPAGE_SHIFT 20
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -30,6 +27,9 @@ extern int HPAGE_SHIFT;
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+#include <asm/setup.h>
+#ifndef __ASSEMBLY__
+
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
#if PAGE_DEFAULT_KEY
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 4cb19fe76dd9..f897ec73dc8c 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -87,7 +87,15 @@ struct sf_raw_sample {
} __packed;
/* Perf hardware reserve and release functions */
+#ifdef CONFIG_PERF_EVENTS
int perf_reserve_sampling(void);
void perf_release_sampling(void);
+#else /* CONFIG_PERF_EVENTS */
+static inline int perf_reserve_sampling(void)
+{
+ return 0;
+}
+static inline void perf_release_sampling(void) {}
+#endif /* CONFIG_PERF_EVENTS */
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
int main(void)
{
- DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
- DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
- DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
- BLANK();
+ DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
+ DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
- DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
- DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
- DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
+ DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+ DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
+ DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
+ DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
+ DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
- DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
+ if (!test_facility(34))
+ return -EOPNOTSUPP;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
*/
ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
- lg %r5,__THREAD_info(%r3) # get thread_info of next
+ lgr %r1,%r2
+ aghi %r1,__TASK_thread # thread_struct of prev task
+ lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
+ lg %r5,__TASK_thread_info(%r3) # get thread_info of next
+ stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
+ lgr %r1,%r3
+ aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
+ lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
- lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
LAST_BREAK %r14
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
+ aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 505c17c0ae1a..56b550893593 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,6 +21,7 @@
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
+#include <asm/ctl_reg.h>
struct mcck_struct {
int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
} else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
- /* Revalidate vector registers */
- if (MACHINE_HAS_VX && current->thread.vxrs) {
+ if (!MACHINE_HAS_VX) {
+ /* Revalidate floating point registers */
+ asm volatile(
+ " ld 0,0(%0)\n"
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ : : "a" (fpt_save_area));
+ } else {
+ /* Revalidate vector registers */
+ union ctlreg0 cr0;
+
if (!mci->vr) {
/*
* Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
kill_task = 1;
}
+ cr0.val = S390_lowcore.cregs_save_area[0];
+ cr0.afp = cr0.vx = 1;
+ __ctl_load(cr0.val, 0, 0);
restore_vx_regs((__vector128 *)
- S390_lowcore.vector_save_area_addr);
+ &S390_lowcore.vector_save_area);
+ __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
/* Revalidate access registers */
asm volatile(
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index dc5edc29b73a..8f587d871b9f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
- asm volatile("sfpc %0,%0" : : "d" (0));
+ asm volatile("sfpc %0" : : "d" (0));
}
/*
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 43c3169ea49c..ada0c07fe1a8 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
+ basr %r13,0
+ lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
.Lesa2:
lr %r10,%r2 # save string pointer
lhi %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
.Lesa3:
lm %r6,%r15,120(%r15) # restore registers
br %r14
+.Lzeroes:
+ .fill 64,4,0
.LwritedataS4:
.long 0x00760005 # SCLP command for write data
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f7f027caaaaa..ca070d260af2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -885,8 +885,6 @@ void __init setup_arch(char **cmdline_p)
*/
setup_hwcaps();
- HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0;
-
/*
* Create kernel page tables and switch to virtual addressing.
*/
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
}
/* get vector interrupt code from fpc */
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+ asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+ asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
- if (!vcpu->requests)
- return 0;
retry:
kvm_s390_vcpu_request_handled(vcpu);
+ if (!vcpu->requests)
+ return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 33082d0d101b..b33f66110ca9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -31,8 +31,6 @@
#define ALLOC_ORDER 2
#define FRAG_MASK 0x03
-int HPAGE_SHIFT;
-
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
BPF_REG_1, offsetof(struct sk_buff, data));
}
- /* BPF compatibility: clear A (%b7) and X (%b8) registers */
- if (REG_SEEN(BPF_REG_7))
- /* lghi %b7,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
- if (REG_SEEN(BPF_REG_8))
- /* lghi %b8,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
+ /* BPF compatibility: clear A (%b0) and X (%b7) registers */
+ if (REG_SEEN(BPF_REG_A))
+ /* lghi %ba,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+ if (REG_SEEN(BPF_REG_X))
+ /* lghi %bx,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
}
/*
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index bc927a09a172..9cfa2ffaa9d6 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/module.h>
#include <asm/processor.h>
+#include <asm/perf_event.h>
#include "../../../drivers/oprofile/oprof.h"
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 138fb3db45ba..92ffe397b893 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/arch/score/include/asm/mm-arch-hooks.h b/arch/score/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5e38689f189a..000000000000
--- a/arch/score/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H
-#define _ASM_SCORE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 9ac4626e7284..aac452b26aa8 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
diff --git a/arch/sh/include/asm/mm-arch-hooks.h b/arch/sh/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 18087298b728..000000000000
--- a/arch/sh/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SH_MM_ARCH_HOOKS_H
-#define _ASM_SH_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SH_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2b2a69dcc467..e928618838bc 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += module.h
generic-y += mutex.h
generic-y += preempt.h
diff --git a/arch/sparc/include/asm/mm-arch-hooks.h b/arch/sparc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b89ba44c16f1..000000000000
--- a/arch/sparc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H
-#define _ASM_SPARC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \
- rd %fprs, %o5; \
- andcc %o5, FPRS_FEF, %g0; \
- be,pt %icc, 297f; \
- sethi %hi(298f), %g7; \
- sethi %hi(VISenterhalf), %g1; \
- jmpl %g1 + %lo(VISenterhalf), %g0; \
- or %g7, %lo(298f), %g7; \
- clr %o5; \
-297: wr %o5, FPRS_FEF, %fprs; \
-298:
+ VISEntry
+
+#define VISExitHalf \
+ VISExit
#define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \
@@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs;
-#define VISExitHalf \
+#define VISExitHalfFast \
wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+ VISExitHalfFast
+#else
VISExitHalf
-
+#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
- cmp %o5, FPRS_DU
- be,pn %icc, 6f
- sll %g1, 3, %g1
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+ sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
-
-6: ldub [%g3 + TI_FPSAVED], %o5
- or %o5, FPRS_DU, %o5
- add %g6, TI_FPREGS+0x80, %g2
- stb %o5, [%g3 + TI_FPSAVED]
-
- sll %g1, 5, %g1
- add %g6, TI_FPREGS+0xc0, %g3
- wr %g0, FPRS_FEF, %fprs
- membar #Sync
- stda %f32, [%g2 + %g1] ASI_BLK_P
- stda %f48, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 80f
- nop
-
- .align 32
-80: jmpl %g7 + %g0, %g0
- nop
-
- .align 32
-VISenterhalf:
- ldub [%g6 + TI_FPDEPTH], %g1
- brnz,a,pn %g1, 1f
- cmp %g1, 1
- stb %g0, [%g6 + TI_FPSAVED]
- stx %fsr, [%g6 + TI_XFSR]
- clr %o5
- jmpl %g7 + %g0, %g0
- wr %g0, FPRS_FEF, %fprs
-
-1: bne,pn %icc, 2f
- srl %g1, 1, %g1
- ba,pt %xcc, vis1
- sub %g7, 8, %g7
-2: addcc %g6, %g1, %g3
- sll %g1, 3, %g1
- andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + TI_FPSAVED]
-
- rd %gsr, %g2
- add %g6, %g1, %g3
- stx %g2, [%g3 + TI_GSR]
- add %g6, %g1, %g2
- stx %fsr, [%g2 + TI_XFSR]
- sll %g1, 5, %g1
-3: andcc %o5, FPRS_DL, %g0
- be,pn %icc, 4f
- add %g6, TI_FPREGS, %g2
-
- add %g6, TI_FPREGS+0x40, %g3
- membar #Sync
- stda %f0, [%g2 + %g1] ASI_BLK_P
- stda %f16, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 4f
- nop
-
- .align 32
-4: and %o5, FPRS_DU, %o5
- jmpl %g7 + %g0, %g0
- wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void);
EXPORT_SYMBOL(VISenter);
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d53654488c2c..d8a843163471 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_regs.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
diff --git a/arch/tile/include/asm/mm-arch-hooks.h b/arch/tile/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d1709ea774f7..000000000000
--- a/arch/tile/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_TILE_MM_ARCH_HOOKS_H
-#define _ASM_TILE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
- memset(to, 0, sizeof(*to));
-
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 99c9ff87e018..6b755d125783 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
void __init free_initrd_mem(unsigned long begin, unsigned long end)
{
- free_bootmem(__pa(begin), end - begin);
+ free_bootmem_late(__pa(begin), end - begin);
}
static int __init setup_initrd(char *str)
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 3d63ff6f583f..149ec55f9c46 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mutex.h
generic-y += param.h
generic-y += pci.h
diff --git a/arch/um/include/asm/mm-arch-hooks.h b/arch/um/include/asm/mm-arch-hooks.h
deleted file mode 100644
index a7c8b0dfdd4e..000000000000
--- a/arch/um/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_UM_MM_ARCH_HOOKS_H
-#define _ASM_UM_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_UM_MM_ARCH_HOOKS_H */
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index d12b377b5a8b..1fc7a286dc6f 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h
diff --git a/arch/unicore32/include/asm/mm-arch-hooks.h b/arch/unicore32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4d79a850c509..000000000000
--- a/arch/unicore32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H
-#define _ASM_UNICORE32_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3dbb7e7909ca..b3a1a5d77d92 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -41,6 +41,7 @@ config X86
select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index a15893d17c55..d8c0d3266173 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING
If unsure, say N.
+config DEBUG_ENTRY
+ bool "Debug low-level entry code"
+ depends on DEBUG_KERNEL
+ ---help---
+ This option enables sanity checks in x86's low-level entry code.
+ Some of these sanity checks may slow down kernel entries and
+ exits or otherwise impact performance.
+
+ This is currently used to help test NMI code.
+
+ If unsure, say N.
+
config DEBUG_NMI_SELFTEST
bool "NMI Selftest"
depends on DEBUG_KERNEL && X86_LOCAL_APIC
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
unsigned int e820_type = 0;
unsigned long m = efi->efi_memmap;
+#ifdef CONFIG_X86_64
+ m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
switch (d->type) {
case EFI_RESERVED_TYPE:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3bb2c4302df1..8cb3e438f21e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1237,11 +1237,12 @@ ENTRY(nmi)
* If the variable is not set and the stack is not the NMI
* stack then:
* o Set the special variable on the stack
- * o Copy the interrupt frame into a "saved" location on the stack
- * o Copy the interrupt frame into a "copy" location on the stack
+ * o Copy the interrupt frame into an "outermost" location on the
+ * stack
+ * o Copy the interrupt frame into an "iret" location on the stack
* o Continue processing the NMI
* If the variable is set or the previous stack is the NMI stack:
- * o Modify the "copy" location to jump to the repeate_nmi
+ * o Modify the "iret" location to jump to the repeat_nmi
* o return back to the first NMI
*
* Now on exit of the first NMI, we first clear the stack variable
@@ -1250,31 +1251,151 @@ ENTRY(nmi)
* a nested NMI that updated the copy interrupt stack frame, a
* jump will be made to the repeat_nmi code that will handle the second
* NMI.
+ *
+ * However, espfix prevents us from directly returning to userspace
+ * with a single IRET instruction. Similarly, IRET to user mode
+ * can fault. We therefore handle NMIs from user space like
+ * other IST entries.
*/
/* Use %rdx as our temp variable throughout */
pushq %rdx
+ testb $3, CS-RIP+8(%rsp)
+ jz .Lnmi_from_kernel
+
+ /*
+ * NMI from user mode. We need to run on the thread stack, but we
+ * can't go through the normal entry paths: NMIs are masked, and
+ * we don't want to enable interrupts, because then we'll end
+ * up in an awkward situation in which IRQs are on but NMIs
+ * are off.
+ */
+
+ SWAPGS
+ cld
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ pushq 5*8(%rdx) /* pt_regs->ss */
+ pushq 4*8(%rdx) /* pt_regs->rsp */
+ pushq 3*8(%rdx) /* pt_regs->flags */
+ pushq 2*8(%rdx) /* pt_regs->cs */
+ pushq 1*8(%rdx) /* pt_regs->rip */
+ pushq $-1 /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+ pushq (%rdx) /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq %rax /* pt_regs->ax */
+ pushq %r8 /* pt_regs->r8 */
+ pushq %r9 /* pt_regs->r9 */
+ pushq %r10 /* pt_regs->r10 */
+ pushq %r11 /* pt_regs->r11 */
+ pushq %rbx /* pt_regs->rbx */
+ pushq %rbp /* pt_regs->rbp */
+ pushq %r12 /* pt_regs->r12 */
+ pushq %r13 /* pt_regs->r13 */
+ pushq %r14 /* pt_regs->r14 */
+ pushq %r15 /* pt_regs->r15 */
+
+ /*
+ * At this point we no longer need to worry about stack damage
+ * due to nesting -- we're on the normal thread stack and we're
+ * done with the NMI stack.
+ */
+
+ movq %rsp, %rdi
+ movq $-1, %rsi
+ call do_nmi
+
+ /*
+ * Return back to user mode. We must *not* do the normal exit
+ * work, because we don't want to enable interrupts. Fortunately,
+ * do_nmi doesn't modify pt_regs.
+ */
+ SWAPGS
+ jmp restore_c_regs_and_iret
+
+.Lnmi_from_kernel:
+ /*
+ * Here's what our stack frame will look like:
+ * +---------------------------------------------------------+
+ * | original SS |
+ * | original Return RSP |
+ * | original RFLAGS |
+ * | original CS |
+ * | original RIP |
+ * +---------------------------------------------------------+
+ * | temp storage for rdx |
+ * +---------------------------------------------------------+
+ * | "NMI executing" variable |
+ * +---------------------------------------------------------+
+ * | iret SS } Copied from "outermost" frame |
+ * | iret Return RSP } on each loop iteration; overwritten |
+ * | iret RFLAGS } by a nested NMI to force another |
+ * | iret CS } iteration if needed. |
+ * | iret RIP } |
+ * +---------------------------------------------------------+
+ * | outermost SS } initialized in first_nmi; |
+ * | outermost Return RSP } will not be changed before |
+ * | outermost RFLAGS } NMI processing is done. |
+ * | outermost CS } Copied to "iret" frame on each |
+ * | outermost RIP } iteration. |
+ * +---------------------------------------------------------+
+ * | pt_regs |
+ * +---------------------------------------------------------+
+ *
+ * The "original" frame is used by hardware. Before re-enabling
+ * NMIs, we need to be done with it, and we need to leave enough
+ * space for the asm code here.
+ *
+ * We return by executing IRET while RSP points to the "iret" frame.
+ * That will either return for real or it will loop back into NMI
+ * processing.
+ *
+ * The "outermost" frame is copied to the "iret" frame on each
+ * iteration of the loop, so each iteration starts with the "iret"
+ * frame pointing to the final return target.
+ */
+
/*
- * If %cs was not the kernel segment, then the NMI triggered in user
- * space, which means it is definitely not nested.
+ * Determine whether we're a nested NMI.
+ *
+ * If we interrupted kernel code between repeat_nmi and
+ * end_repeat_nmi, then we are a nested NMI. We must not
+ * modify the "iret" frame because it's being written by
+ * the outer NMI. That's okay; the outer NMI handler is
+ * about to about to call do_nmi anyway, so we can just
+ * resume the outer NMI.
*/
- cmpl $__KERNEL_CS, 16(%rsp)
- jne first_nmi
+
+ movq $repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja 1f
+ movq $end_repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja nested_nmi_out
+1:
/*
- * Check the special variable on the stack to see if NMIs are
- * executing.
+ * Now check "NMI executing". If it's set, then we're nested.
+ * This will not detect if we interrupted an outer NMI just
+ * before IRET.
*/
cmpl $1, -8(%rsp)
je nested_nmi
/*
- * Now test if the previous stack was an NMI stack.
- * We need the double check. We check the NMI stack to satisfy the
- * race when the first NMI clears the variable before returning.
- * We check the variable because the first NMI could be in a
- * breakpoint routine using a breakpoint stack.
+ * Now test if the previous stack was an NMI stack. This covers
+ * the case where we interrupt an outer NMI after it clears
+ * "NMI executing" but before IRET. We need to be careful, though:
+ * there is one case in which RSP could point to the NMI stack
+ * despite there being no NMI active: naughty userspace controls
+ * RSP at the very beginning of the SYSCALL targets. We can
+ * pull a fast one on naughty userspace, though: we program
+ * SYSCALL to mask DF, so userspace cannot cause DF to be set
+ * if it controls the kernel's RSP. We set DF before we clear
+ * "NMI executing".
*/
lea 6*8(%rsp), %rdx
/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1286,25 +1407,20 @@ ENTRY(nmi)
cmpq %rdx, 4*8(%rsp)
/* If it is below the NMI stack, it is a normal NMI */
jb first_nmi
- /* Ah, it is within the NMI stack, treat it as nested */
+
+ /* Ah, it is within the NMI stack. */
+
+ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
+ jz first_nmi /* RSP was user controlled. */
+
+ /* This is a nested NMI. */
nested_nmi:
/*
- * Do nothing if we interrupted the fixup in repeat_nmi.
- * It's about to repeat the NMI handler, so we are fine
- * with ignoring this one.
+ * Modify the "iret" frame to point to repeat_nmi, forcing another
+ * iteration of NMI handling.
*/
- movq $repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja 1f
- movq $end_repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja nested_nmi_out
-
-1:
- /* Set up the interrupted NMIs stack to jump to repeat_nmi */
- leaq -1*8(%rsp), %rdx
- movq %rdx, %rsp
+ subq $8, %rsp
leaq -10*8(%rsp), %rdx
pushq $__KERNEL_DS
pushq %rdx
@@ -1318,61 +1434,42 @@ nested_nmi:
nested_nmi_out:
popq %rdx
- /* No need to check faults here */
+ /* We are returning to kernel mode, so this cannot result in a fault. */
INTERRUPT_RETURN
first_nmi:
- /*
- * Because nested NMIs will use the pushed location that we
- * stored in rdx, we must keep that space available.
- * Here's what our stack frame will look like:
- * +-------------------------+
- * | original SS |
- * | original Return RSP |
- * | original RFLAGS |
- * | original CS |
- * | original RIP |
- * +-------------------------+
- * | temp storage for rdx |
- * +-------------------------+
- * | NMI executing variable |
- * +-------------------------+
- * | copied SS |
- * | copied Return RSP |
- * | copied RFLAGS |
- * | copied CS |
- * | copied RIP |
- * +-------------------------+
- * | Saved SS |
- * | Saved Return RSP |
- * | Saved RFLAGS |
- * | Saved CS |
- * | Saved RIP |
- * +-------------------------+
- * | pt_regs |
- * +-------------------------+
- *
- * The saved stack frame is used to fix up the copied stack frame
- * that a nested NMI may change to make the interrupted NMI iret jump
- * to the repeat_nmi. The original stack frame and the temp storage
- * is also used by nested NMIs and can not be trusted on exit.
- */
- /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+ /* Restore rdx. */
movq (%rsp), %rdx
- /* Set the NMI executing variable on the stack. */
- pushq $1
+ /* Make room for "NMI executing". */
+ pushq $0
- /* Leave room for the "copied" frame */
+ /* Leave room for the "iret" frame */
subq $(5*8), %rsp
- /* Copy the stack frame to the Saved frame */
+ /* Copy the "original" frame to the "outermost" frame */
.rept 5
pushq 11*8(%rsp)
.endr
/* Everything up to here is safe from nested NMIs */
+#ifdef CONFIG_DEBUG_ENTRY
+ /*
+ * For ease of testing, unmask NMIs right away. Disabled by
+ * default because IRET is very expensive.
+ */
+ pushq $0 /* SS */
+ pushq %rsp /* RSP (minus 8 because of the previous push) */
+ addq $8, (%rsp) /* Fix up RSP */
+ pushfq /* RFLAGS */
+ pushq $__KERNEL_CS /* CS */
+ pushq $1f /* RIP */
+ INTERRUPT_RETURN /* continues at repeat_nmi below */
+1:
+#endif
+
+repeat_nmi:
/*
* If there was a nested NMI, the first NMI's iret will return
* here. But NMIs are still enabled and we can take another
@@ -1381,16 +1478,20 @@ first_nmi:
* it will just return, as we are about to repeat an NMI anyway.
* This makes it safe to copy to the stack frame that a nested
* NMI will update.
+ *
+ * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
+ * we're repeating an NMI, gsbase has the same value that it had on
+ * the first iteration. paranoid_entry will load the kernel
+ * gsbase if needed before we call do_nmi. "NMI executing"
+ * is zero.
*/
-repeat_nmi:
+ movq $1, 10*8(%rsp) /* Set "NMI executing". */
+
/*
- * Update the stack variable to say we are still in NMI (the update
- * is benign for the non-repeat case, where 1 was pushed just above
- * to this very stack slot).
+ * Copy the "outermost" frame to the "iret" frame. NMIs that nest
+ * here must not modify the "iret" frame while we're writing to
+ * it or it will end up containing garbage.
*/
- movq $1, 10*8(%rsp)
-
- /* Make another copy, this one may be modified by nested NMIs */
addq $(10*8), %rsp
.rept 5
pushq -6*8(%rsp)
@@ -1399,9 +1500,9 @@ repeat_nmi:
end_repeat_nmi:
/*
- * Everything below this point can be preempted by a nested
- * NMI if the first NMI took an exception and reset our iret stack
- * so that we repeat another NMI.
+ * Everything below this point can be preempted by a nested NMI.
+ * If this happens, then the inner NMI will change the "iret"
+ * frame to point back to repeat_nmi.
*/
pushq $-1 /* ORIG_RAX: no syscall to restart */
ALLOC_PT_GPREGS_ON_STACK
@@ -1415,28 +1516,11 @@ end_repeat_nmi:
*/
call paranoid_entry
- /*
- * Save off the CR2 register. If we take a page fault in the NMI then
- * it could corrupt the CR2 value. If the NMI preempts a page fault
- * handler before it was able to read the CR2 register, and then the
- * NMI itself takes a page fault, the page fault that was preempted
- * will read the information from the NMI page fault and not the
- * origin fault. Save it off and restore it if it changes.
- * Use the r12 callee-saved register.
- */
- movq %cr2, %r12
-
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
call do_nmi
- /* Did the NMI take a page fault? Restore cr2 if it did */
- movq %cr2, %rcx
- cmpq %rcx, %r12
- je 1f
- movq %r12, %cr2
-1:
testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
@@ -1444,11 +1528,26 @@ nmi_swapgs:
nmi_restore:
RESTORE_EXTRA_REGS
RESTORE_C_REGS
- /* Pop the extra iret frame at once */
+
+ /* Point RSP at the "iret" frame. */
REMOVE_PT_GPREGS_FROM_STACK 6*8
- /* Clear the NMI executing stack variable */
- movq $0, 5*8(%rsp)
+ /*
+ * Clear "NMI executing". Set DF first so that we can easily
+ * distinguish the remaining code between here and IRET from
+ * the SYSCALL entry and exit paths. On a native kernel, we
+ * could just inspect RIP, but, on paravirt kernels,
+ * INTERRUPT_RETURN can translate into a jump into a
+ * hypercall page.
+ */
+ std
+ movq $0, 5*8(%rsp) /* clear "NMI executing" */
+
+ /*
+ * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
+ * stack in a single instruction. We are returning to kernel
+ * mode, so this cannot result in a fault.
+ */
INTERRUPT_RETURN
END(nmi)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
@@ -205,7 +206,6 @@ sysexit_from_sys_call:
movl RDX(%rsp), %edx /* arg3 */
movl RSI(%rsp), %ecx /* arg4 */
movl RDI(%rsp), %r8d /* arg5 */
- movl %ebp, %r9d /* arg6 */
.endm
.macro auditsys_exit exit
@@ -220,7 +220,6 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
- movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
sysenter_auditsys:
auditsys_entry_common
+ movl %ebp, %r9d /* reload 6th syscall arg */
jmp sysenter_dispatch
sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
* 32-bit zero extended:
*/
ASM_STAC
-1: movl (%r8), %ebp
+1: movl (%r8), %r9d
_ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
cstar_do_call:
/* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi, %r8d /* arg5 */
- movl %ebp, %r9d /* arg6 */
+ /* r9 already loaded */ /* arg6 */
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx, %edi /* arg1 */
movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
call *ia32_sys_call_table(, %rax, 8)
movq %rax, RAX(%rsp)
1:
- movl RCX(%rsp), %ebp
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -369,6 +368,7 @@ sysretl_from_sys_call:
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
+ movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8
@@ -392,7 +392,9 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL
cstar_auditsys:
+ movl %r9d, R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common
+ movl R9(%rsp), %r9d /* reload 6th syscall arg */
jmp cstar_dispatch
sysretl_audit:
@@ -404,14 +406,16 @@ cstar_tracesys:
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz cstar_auditsys
#endif
+ xchgl %r9d, %ebp
SAVE_EXTRA_REGS
xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp)
movq %rax, R10(%rsp)
- movq %rax, R9(%rsp)
+ movq %r9, R9(%rsp)
movq %rax, R8(%rsp)
movq %rsp, %rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
+ movl R9(%rsp), %r9d
/* Reload arg registers from stack. (see sysenter_tracesys) */
movl RCX(%rsp), %ecx
@@ -421,6 +425,7 @@ cstar_tracesys:
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS
+ xchgl %ebp, %r9d
jmp cstar_do_call
END(entry_SYSCALL_compat)
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 4dd1f2d770af..aeac434c9feb 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += cputime.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
- set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- preempt_disable();
- load_LDT_nolock(pc);
- preempt_enable();
-}
-
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 0637826292de..c49c5173158e 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -189,6 +189,7 @@ union fpregs_state {
struct fxregs_state fxsave;
struct swregs_state soft;
struct xregs_state xsave;
+ u8 __padding[PAGE_SIZE];
};
/*
@@ -198,40 +199,6 @@ union fpregs_state {
*/
struct fpu {
/*
- * @state:
- *
- * In-memory copy of all FPU registers that we save/restore
- * over context switches. If the task is using the FPU then
- * the registers in the FPU are more recent than this state
- * copy. If the task context-switches away then they get
- * saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
- */
- union fpregs_state state;
-
- /*
* @last_cpu:
*
* Records the last CPU on which this context was loaded into
@@ -288,6 +255,43 @@ struct fpu {
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
+ /*
+ * @state:
+ *
+ * In-memory copy of all FPU registers that we save/restore
+ * over context switches. If the task is using the FPU then
+ * the registers in the FPU are more recent than this state
+ * copy. If the task context-switches away then they get
+ * saved here and represent the FPU state.
+ *
+ * After context switches there may be a (short) time period
+ * during which the in-FPU hardware registers are unchanged
+ * and still perfectly match this state, if the tasks
+ * scheduled afterwards are not using the FPU.
+ *
+ * This is the 'lazy restore' window of optimization, which
+ * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
+ *
+ * We detect whether a subsequent task uses the FPU via setting
+ * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
+ *
+ * During this window, if the task gets scheduled again, we
+ * might be able to skip having to do a restore from this
+ * memory buffer to the hardware registers - at the cost of
+ * incurring the overhead of #NM fault traps.
+ *
+ * Note that on modern CPUs that support the XSAVEOPT (or other
+ * optimized XSAVE instructions), we don't use #NM traps anymore,
+ * as the hardware can track whether FPU registers need saving
+ * or not. On such CPUs we activate the non-lazy ('eagerfpu')
+ * logic, which unconditionally saves/restores all FPU state
+ * across context switches. (if FPU state exists.)
+ */
+ union fpregs_state state;
+ /*
+ * WARNING: 'state' is dynamically-sized. Do not put
+ * anything after it here.
+ */
};
#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h
index 200ec2e7821d..cd0310e186f4 100644
--- a/arch/x86/include/asm/intel_pmc_ipc.h
+++ b/arch/x86/include/asm/intel_pmc_ipc.h
@@ -25,36 +25,9 @@
#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
-/*
- * intel_pmc_ipc_simple_command
- * @cmd: command
- * @sub: sub type
- */
int intel_pmc_ipc_simple_command(int cmd, int sub);
-
-/*
- * intel_pmc_ipc_raw_cmd
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- * @sptr: data writing to SPTR register
- * @dptr: data writing to DPTR register
- */
int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen, u32 dptr, u32 sptr);
-
-/*
- * intel_pmc_ipc_command
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- */
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2a7f5d782c33..49ec9038ec14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -604,6 +604,8 @@ struct kvm_arch {
bool iommu_noncoherent;
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count;
+#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+ atomic_t assigned_device_count;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
diff --git a/arch/x86/include/asm/mm-arch-hooks.h b/arch/x86/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4e881a342236..000000000000
--- a/arch/x86/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_X86_MM_ARCH_HOOKS_H
-#define _ASM_X86_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_X86_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
- void *ldt;
- int size;
+ struct ldt_struct *ldt;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 5e8daee7c5c9..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
static inline void load_mm_cr4(struct mm_struct *mm)
{
- if (static_key_true(&rdpmc_always_available) ||
+ if (static_key_false(&rdpmc_always_available) ||
atomic_read(&mm->context.perf_rdpmc_allowed))
cr4_set_bits(X86_CR4_PCE);
else
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+ /*
+ * Xen requires page-aligned LDTs with special permissions. This is
+ * needed to prevent us from installing evil descriptors such as
+ * call gates. On native, we could merge the ldt_struct and LDT
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+ int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+ struct ldt_struct *ldt;
+
+ /* lockless_dereference synchronizes with smp_store_release */
+ ldt = lockless_dereference(mm->context.ldt);
+
+ /*
+ * Any change to mm->context.ldt is followed by an IPI to all
+ * CPUs with the mm active. The LDT will not be freed until
+ * after the IPI is handled by all such CPUs. This means that,
+ * if the ldt_struct changes before we return, the values we see
+ * will be safe, and the new values will be loaded before we run
+ * any user code.
+ *
+ * NB: don't try to convert this to use RCU without extreme care.
+ * We would still need IRQs off, because we don't want to change
+ * the local LDT after an IPI loaded a newer value than the one
+ * that we can see.
+ */
+
+ if (unlikely(ldt))
+ set_ldt(ldt->entries, ldt->size);
+ else
+ clear_LDT();
+
+ DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
+/*
* Used for LDT copy/destruction.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
- * never free an LDT while the mm still exists. That
- * means that next->context.ldt != prev->context.ldt,
- * because mms never share an LDT.
+ * never set context.ldt to NULL while the mm still
+ * exists. That means that next->context.ldt !=
+ * prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
}
#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 43e6519df0d5..944f1785ed0d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -390,9 +390,6 @@ struct thread_struct {
#endif
unsigned long gs;
- /* Floating point and extended processor state */
- struct fpu fpu;
-
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
@@ -418,6 +415,13 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
+
+ /* Floating point and extended processor state */
+ struct fpu fpu;
+ /*
+ * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
+ * the end.
+ */
};
/*
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short __pad2; /* Was called gs, but was always zero. */
- unsigned short __pad1; /* Was called fs, but was always zero. */
- unsigned short ss;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15", "flags"
+ "r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 8fba544e9cc4..f36d56bd7632 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -108,6 +108,8 @@
#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
/* Support for a virtual guest idle state is available */
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
+/* Guest crash data handler available */
+#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
/*
* Implementation recommendations. Indicates which behaviors the hypervisor
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
struct kvm_sync_regs {
};
-#define KVM_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
-
- /*
- * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
- * Linux saved and restored fs and gs in these slots. This
- * was counterproductive, as fsbase and gsbase were never
- * saved, so arch_prctl was presumably unreliable.
- *
- * If these slots are ever needed for any other purpose, there
- * is some risk that very old 64-bit binaries could get
- * confused. I doubt that many such binaries still work,
- * though, since the same patch in 2.5.64 also removed the
- * 64-bit set_thread_area syscall, so it appears that there is
- * no TLS API that works in both pre- and post-2.5.64 kernels.
- */
- __u16 __pad2; /* Was gs. */
- __u16 __pad1; /* Was fs. */
-
- __u16 ss;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
if (info->ioapic_trigger != data->trigger)
- mp_register_handler(irq, data->trigger);
+ mp_register_handler(irq, info->ioapic_trigger);
data->entry.trigger = data->trigger = info->ioapic_trigger;
data->entry.polarity = data->polarity = info->ioapic_polarity;
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f813261d9740..2683f36e4e0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector_policy(virq, irq_data->node, data,
+ err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
clear_all_debug_regs();
dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct ldt_struct *ldt;
+
if (idx > LDT_ENTRIES)
return 0;
- if (idx > current->active_mm->context.size)
+ /* IRQs are off, so this synchronizes with smp_store_release */
+ ldt = lockless_dereference(current->active_mm->context.ldt);
+ if (!ldt || idx > ldt->size)
return 0;
- desc = current->active_mm->context.ldt;
+ desc = &ldt->entries[idx];
} else {
if (idx > GDT_ENTRIES)
return 0;
- desc = raw_cpu_ptr(gdt_page.gdt);
+ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
}
- return get_desc_base(desc + idx);
+ return get_desc_base(desc);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
- return NOTIFY_BAD;
+ goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
- if (!cpuc->excl_cntrs) {
- kfree(cpuc->constraint_list);
- kfree(cpuc->shared_regs);
- return NOTIFY_BAD;
- }
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
+
+err_constraint_list:
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+
+err_shared_regs:
+ kfree(cpuc->shared_regs);
+ cpuc->shared_regs = NULL;
+
+err:
+ return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
return 0;
/*
+ * Getting up-to-date values requires an SMP IPI which is not
+ * possible if we're being called in interrupt context. Return
+ * the cached values instead.
+ */
+ if (unlikely(in_interrupt()))
+ goto out;
+
+ /*
* Notice that we don't perform the reading of an RMID
* atomically, because we can't hold a spin lock across the
* IPIs.
@@ -1247,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1288,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- intel_cqm_cpu_prepare(cpu);
- break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
+ intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
@@ -1365,7 +1371,7 @@ static int __init intel_cqm_init(void)
goto out;
for_each_online_cpu(i) {
- intel_cqm_cpu_prepare(i);
+ intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954626fd..d25097c3fc1d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
- if (src_fpu->fpstate_active)
+ if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 32826791e675..d14e9ac3235a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -4,6 +4,8 @@
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
+#include <linux/sched.h>
+
/*
* Initialize the TS bit in CR0 according to the style of context-switches
* we are using:
@@ -38,7 +40,12 @@ static void fpu__init_cpu_generic(void)
write_cr0(cr0);
/* Flush out any pending x87 state: */
- asm volatile ("fninit");
+#ifdef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu)
+ fpstate_init_soft(&current->thread.fpu.state.soft);
+ else
+#endif
+ asm volatile ("fninit");
}
/*
@@ -136,6 +143,43 @@ static void __init fpu__init_system_generic(void)
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);
+/* Enforce that 'MEMBER' is the last field of 'TYPE': */
+#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+ BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
+
+/*
+ * We append the 'struct fpu' to the task_struct:
+ */
+static void __init fpu__init_task_struct_size(void)
+{
+ int task_size = sizeof(struct task_struct);
+
+ /*
+ * Subtract off the static size of the register state.
+ * It potentially has a bunch of padding.
+ */
+ task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
+
+ /*
+ * Add back the dynamically-calculated register state
+ * size.
+ */
+ task_size += xstate_size;
+
+ /*
+ * We dynamically size 'struct fpu', so we require that
+ * it be at the end of 'thread_struct' and that
+ * 'thread_struct' be at the end of 'task_struct'. If
+ * you hit a compile error here, check the structure to
+ * see if something got added to the end.
+ */
+ CHECK_MEMBER_AT_END_OF(struct fpu, state);
+ CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
+ CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
+
+ arch_task_struct_size = task_size;
+}
+
/*
* Set up the xstate_size based on the legacy FPU context size.
*
@@ -287,6 +331,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
fpu__init_system_generic();
fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate();
+ fpu__init_task_struct_size();
fpu__init_system_ctx_switch();
}
@@ -311,9 +356,15 @@ static int __init x86_noxsave_setup(char *s)
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
return 1;
}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -20,82 +21,82 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
- if (current->active_mm == current_mm)
- load_LDT(&current->active_mm->context);
+ mm_context_t *pc;
+
+ if (current->active_mm != current_mm)
+ return;
+
+ pc = &current->active_mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->size);
}
-#endif
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
{
- void *oldldt, *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+ struct ldt_struct *new_ldt;
+ int alloc_size;
+
+ if (size > LDT_ENTRIES)
+ return NULL;
+
+ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+ if (!new_ldt)
+ return NULL;
+
+ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+ alloc_size = size * LDT_ENTRY_SIZE;
+
+ /*
+ * Xen is very picky: it requires a page-aligned LDT that has no
+ * trailing nonzero bytes in any page that contains LDT descriptors.
+ * Keep it simple: zero the whole allocation and never allocate less
+ * than PAGE_SIZE.
+ */
+ if (alloc_size > PAGE_SIZE)
+ new_ldt->entries = vzalloc(alloc_size);
else
- newldt = (void *)__get_free_page(GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
+ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
- (mincount - oldsize) * LDT_ENTRY_SIZE);
+ if (!new_ldt->entries) {
+ kfree(new_ldt);
+ return NULL;
+ }
- paravirt_alloc_ldt(newldt, mincount);
+ new_ldt->size = size;
+ return new_ldt;
+}
-#ifdef CONFIG_X86_64
- /* CHECKME: Do we really need this ? */
- wmb();
-#endif
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- preempt_disable();
- load_LDT(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
-#else
- load_LDT(pc);
-#endif
- }
- if (oldsize) {
- paravirt_free_ldt(oldldt, oldsize);
- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- put_page(virt_to_page(oldldt));
- }
- return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->size);
}
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+ struct ldt_struct *ldt)
{
- int err = alloc_ldt(new, old->size, 0);
- int i;
+ /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ smp_store_release(&current_mm->context.ldt, ldt);
+
+ /* Activate the LDT for all CPUs using current_mm. */
+ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+}
- if (err < 0)
- return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (likely(!ldt))
+ return;
- for (i = 0; i < old->size; i++)
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
- return 0;
+ paravirt_free_ldt(ldt->entries, ldt->size);
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ kfree(ldt->entries);
+ kfree(ldt);
}
/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;
mutex_init(&mm->context.lock);
- mm->context.size = 0;
old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- mutex_lock(&old_mm->context.lock);
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
+ if (!old_mm) {
+ mm->context.ldt = NULL;
+ return 0;
}
+
+ mutex_lock(&old_mm->context.lock);
+ if (!old_mm->context.ldt) {
+ mm->context.ldt = NULL;
+ goto out_unlock;
+ }
+
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ if (!new_ldt) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ new_ldt->size * LDT_ENTRY_SIZE);
+ finalize_ldt_struct(new_ldt);
+
+ mm->context.ldt = new_ldt;
+
+out_unlock:
+ mutex_unlock(&old_mm->context.lock);
return retval;
}
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
-#ifdef CONFIG_X86_32
- /* CHECKME: Can this ever happen ? */
- if (mm == current->active_mm)
- clear_LDT();
-#endif
- paravirt_free_ldt(mm->context.ldt, mm->context.size);
- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- put_page(virt_to_page(mm->context.ldt));
- mm->context.size = 0;
- }
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int err;
+ int retval;
unsigned long size;
struct mm_struct *mm = current->mm;
- if (!mm->context.size)
- return 0;
+ mutex_lock(&mm->context.lock);
+
+ if (!mm->context.ldt) {
+ retval = 0;
+ goto out_unlock;
+ }
+
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
- mutex_lock(&mm->context.lock);
- size = mm->context.size * LDT_ENTRY_SIZE;
+ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- mutex_unlock(&mm->context.lock);
- if (err < 0)
- goto error_return;
+ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ retval = -EFAULT;
+ goto out_unlock;
+ }
+
if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr + size, bytecount - size) != 0) {
- err = -EFAULT;
- goto error_return;
+ /* Zero-fill the rest and pretend we read bytecount bytes. */
+ if (clear_user(ptr + size, bytecount - size)) {
+ retval = -EFAULT;
+ goto out_unlock;
}
}
- return bytecount;
-error_return:
- return err;
+ retval = bytecount;
+
+out_unlock:
+ mutex_unlock(&mm->context.lock);
+ return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
+ int oldsize, newsize;
+ struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}
- mutex_lock(&mm->context.lock);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(&current->mm->context,
- ldt_info.entry_number + 1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- memset(&ldt, 0, sizeof(ldt));
- goto install;
+ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+ LDT_empty(&ldt_info)) {
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
}
+
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
}
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
+ mutex_lock(&mm->context.lock);
+
+ old_ldt = mm->context.ldt;
+ oldsize = old_ldt ? old_ldt->size : 0;
+ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+ error = -ENOMEM;
+ new_ldt = alloc_ldt_struct(newsize);
+ if (!new_ldt)
goto out_unlock;
- }
- fill_ldt(&ldt, &ldt_info);
- if (oldmode)
- ldt.avl = 0;
+ if (old_ldt)
+ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ new_ldt->entries[ldt_info.entry_number] = ldt;
+ finalize_ldt_struct(new_ldt);
- /* Install the new entry ... */
-install:
- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+ install_ldt(mm, new_ldt);
+ free_ldt_struct(old_ldt);
error = 0;
out_unlock:
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index c3e985d1751c..d05bd2e2ee91 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
NOKPROBE_SYMBOL(default_do_nmi);
/*
- * NMIs can hit breakpoints which will cause it to lose its
- * NMI context with the CPU when the breakpoint does an iret.
- */
-#ifdef CONFIG_X86_32
-/*
- * For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C (preventing nested
- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
- * can be in:
+ * NMIs can page fault or hit breakpoints which will cause it to lose
+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+ *
+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+ * NMI processing. On x86_64, the asm glue protects us from nested NMIs
+ * if the outer NMI came from kernel mode, but we can still nest if the
+ * outer NMI came from user mode.
+ *
+ * To handle these nested NMIs, we have three states:
*
* 1) not running
* 2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
* (Note, the latch is binary, thus multiple NMIs triggering,
* when one is running, are ignored. Only one NMI is restarted.)
*
- * If an NMI hits a breakpoint that executes an iret, another
- * NMI can preempt it. We do not want to allow this new NMI
- * to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the exit of the first NMI will
- * perform a dec_return, if the result is zero (NOT_RUNNING), then
- * it will simply exit the NMI handler. If not, the dec_return
- * would have set the state to NMI_EXECUTING (what we want it to
- * be when we are running). In this case, we simply jump back
- * to rerun the NMI handler again, and restart the 'latched' NMI.
+ * If an NMI executes an iret, another NMI can preempt it. We do not
+ * want to allow this new NMI to run, but we want to execute it when the
+ * first one finishes. We set the state to "latched", and the exit of
+ * the first NMI will perform a dec_return, if the result is zero
+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
+ * dec_return would have set the state to NMI_EXECUTING (what we want it
+ * to be when we are running). In this case, we simply jump back to
+ * rerun the NMI handler again, and restart the 'latched' NMI.
*
* No trap (breakpoint or page fault) should be hit before nmi_restart,
* thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2);
-#define nmi_nesting_preprocess(regs) \
- do { \
- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
- this_cpu_write(nmi_state, NMI_LATCHED); \
- return; \
- } \
- this_cpu_write(nmi_state, NMI_EXECUTING); \
- this_cpu_write(nmi_cr2, read_cr2()); \
- } while (0); \
- nmi_restart:
-
-#define nmi_nesting_postprocess() \
- do { \
- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
- write_cr2(this_cpu_read(nmi_cr2)); \
- if (this_cpu_dec_return(nmi_state)) \
- goto nmi_restart; \
- } while (0)
-#else /* x86_64 */
+#ifdef CONFIG_X86_64
/*
- * In x86_64 things are a bit more difficult. This has the same problem
- * where an NMI hitting a breakpoint that calls iret will remove the
- * NMI context, allowing a nested NMI to enter. What makes this more
- * difficult is that both NMIs and breakpoints have their own stack.
- * When a new NMI or breakpoint is executed, the stack is set to a fixed
- * point. If an NMI is nested, it will have its stack set at that same
- * fixed address that the first NMI had, and will start corrupting the
- * stack. This is handled in entry_64.S, but the same problem exists with
- * the breakpoint stack.
+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
+ * some care, the inner breakpoint will clobber the outer breakpoint's
+ * stack.
*
- * If a breakpoint is being processed, and the debug stack is being used,
- * if an NMI comes in and also hits a breakpoint, the stack pointer
- * will be set to the same fixed address as the breakpoint that was
- * interrupted, causing that stack to be corrupted. To handle this case,
- * check if the stack that was interrupted is the debug stack, and if
- * so, change the IDT so that new breakpoints will use the current stack
- * and not switch to the fixed address. On return of the NMI, switch back
- * to the original IDT.
+ * If a breakpoint is being processed, and the debug stack is being
+ * used, if an NMI comes in and also hits a breakpoint, the stack
+ * pointer will be set to the same fixed address as the breakpoint that
+ * was interrupted, causing that stack to be corrupted. To handle this
+ * case, check if the stack that was interrupted is the debug stack, and
+ * if so, change the IDT so that new breakpoints will use the current
+ * stack and not switch to the fixed address. On return of the NMI,
+ * switch back to the original IDT.
*/
static DEFINE_PER_CPU(int, update_debug_stack);
+#endif
-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+dotraplinkage notrace void
+do_nmi(struct pt_regs *regs, long error_code)
{
+ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+ this_cpu_write(nmi_state, NMI_LATCHED);
+ return;
+ }
+ this_cpu_write(nmi_state, NMI_EXECUTING);
+ this_cpu_write(nmi_cr2, read_cr2());
+nmi_restart:
+
+#ifdef CONFIG_X86_64
/*
* If we interrupted a breakpoint, it is possible that
* the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
debug_stack_set_zero();
this_cpu_write(update_debug_stack, 1);
}
-}
-
-static inline void nmi_nesting_postprocess(void)
-{
- if (unlikely(this_cpu_read(update_debug_stack))) {
- debug_stack_reset();
- this_cpu_write(update_debug_stack, 0);
- }
-}
#endif
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
-{
- nmi_nesting_preprocess(regs);
-
nmi_enter();
inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
nmi_exit();
- /* On i386, may loop back to preprocess */
- nmi_nesting_postprocess();
+#ifdef CONFIG_X86_64
+ if (unlikely(this_cpu_read(update_debug_stack))) {
+ debug_stack_reset();
+ this_cpu_write(update_debug_stack, 0);
+ }
+#endif
+
+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+ write_cr2(this_cpu_read(nmi_cr2));
+ if (this_cpu_dec_return(nmi_state))
+ goto nmi_restart;
}
NOKPROBE_SYMBOL(do_nmi);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9cad694ed7c4..c27cad726765 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- *dst = *src;
+ memcpy(dst, src, arch_task_struct_size);
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
+ trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
- if (dead_task->mm->context.size) {
+ if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ dead_task->mm->context.ldt->size);
BUG();
}
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
COPY(r15);
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->__pad2);
- put_user_ex(0, &sc->__pad1);
- put_user_ex(regs->ss, &sc->ss);
+ put_user_ex(0, &sc->gs);
+ put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
- /*
- * Set up the CS and SS registers to run signal handlers in
- * 64-bit mode, even if the handler happens to be interrupting
- * 32-bit or 16-bit code.
- *
- * SS is subtle. In 64-bit mode, we don't need any particular
- * SS descriptor, but we do need SS to be valid. It's possible
- * that the old SS is entirely bogus -- this can happen if the
- * signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value.
- */
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- regs->ss = __USER_DS;
return 0;
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index d3010aa79daf..b1f3ed9c7a9e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -992,8 +992,17 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
common_cpu_up(cpu, tidle);
+ /*
+ * We have to walk the irq descriptors to setup the vector
+ * space for the cpu which comes online. Prevent irq
+ * alloc/free across the bringup.
+ */
+ irq_lock_sparse();
+
err = do_boot_cpu(apicid, cpu, tidle);
+
if (err) {
+ irq_unlock_sparse();
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
return -EIO;
}
@@ -1011,6 +1020,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
touch_nmi_watchdog();
}
+ irq_unlock_sparse();
+
return 0;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
- if (unlikely((seg >> 3) >= child->mm->context.size))
+ if (unlikely(!child->mm->context.ldt ||
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = child->mm->context.ldt + seg;
+ desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 64dd46793099..2fbea2544f24 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
+ if (vcpu->arch.eager_fpu)
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 7dbced309ddb..5c520ebf6343 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
goto out_unmap;
}
+ kvm_arch_start_assignment(kvm);
pci_set_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
iommu_detach_device(domain, &pdev->dev);
pci_clear_dev_assigned(pdev);
+ kvm_arch_end_assignment(kvm);
dev_info(&pdev->dev, "kvm deassign device\n");
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic);
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f807496b62c2..44171462bd2a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
return 0;
}
+static bool kvm_is_mmio_pfn(pfn_t pfn)
+{
+ if (pfn_valid(pfn))
+ return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+
+ return true;
+}
+
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level,
gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
- kvm_is_reserved_pfn(pfn));
+ kvm_is_mmio_pfn(pfn));
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
}
+static u8 mtrr_disabled_type(void)
+{
+ /*
+ * Intel SDM 11.11.2.2: all MTRRs are disabled when
+ * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+ * memory type is applied to all of physical memory.
+ */
+ return MTRR_TYPE_UNCACHABLE;
+}
+
/*
* Three terms are used in the following code:
* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
/* output fields. */
int mem_type;
+ /* mtrr is completely disabled? */
+ bool mtrr_disabled;
/* [start, end) is not fully covered in MTRRs? */
bool partial_map;
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
static void mtrr_lookup_start(struct mtrr_iter *iter)
{
if (!mtrr_is_enabled(iter->mtrr_state)) {
- iter->partial_map = true;
+ iter->mtrr_disabled = true;
return;
}
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
iter->mtrr_state = mtrr_state;
iter->start = start;
iter->end = end;
+ iter->mtrr_disabled = false;
iter->partial_map = false;
iter->fixed = false;
iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
return MTRR_TYPE_WRBACK;
}
- /* It is not covered by MTRRs. */
- if (iter.partial_map) {
- /*
- * We just check one page, partially covered by MTRRs is
- * impossible.
- */
- WARN_ON(type != -1);
- type = mtrr_default_type(mtrr_state);
- }
+ if (iter.mtrr_disabled)
+ return mtrr_disabled_type();
+
+ /* not contained in any MTRRs. */
+ if (type == -1)
+ return mtrr_default_type(mtrr_state);
+
+ /*
+ * We just check one page, partially covered by MTRRs is
+ * impossible.
+ */
+ WARN_ON(iter.partial_map);
+
return type;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
}
+ if (iter.mtrr_disabled)
+ return true;
+
if (!iter.partial_map)
return true;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 602b974a60a6..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
+#define MTRR_TYPE_UC_MINUS 7
+#define MTRR2PROTVAL_INVALID 0xff
+
+static u8 mtrr2protval[8];
+
+static u8 fallback_mtrr_type(int mtrr)
+{
+ /*
+ * WT and WP aren't always available in the host PAT. Treat
+ * them as UC and UC- respectively. Everything else should be
+ * there.
+ */
+ switch (mtrr)
+ {
+ case MTRR_TYPE_WRTHROUGH:
+ return MTRR_TYPE_UNCACHABLE;
+ case MTRR_TYPE_WRPROT:
+ return MTRR_TYPE_UC_MINUS;
+ default:
+ BUG();
+ }
+}
+
+static void build_mtrr2protval(void)
+{
+ int i;
+ u64 pat;
+
+ for (i = 0; i < 8; i++)
+ mtrr2protval[i] = MTRR2PROTVAL_INVALID;
+
+ /* Ignore the invalid MTRR types. */
+ mtrr2protval[2] = 0;
+ mtrr2protval[3] = 0;
+
+ /*
+ * Use host PAT value to figure out the mapping from guest MTRR
+ * values to nested page table PAT/PCD/PWT values. We do not
+ * want to change the host PAT value every time we enter the
+ * guest.
+ */
+ rdmsrl(MSR_IA32_CR_PAT, pat);
+ for (i = 0; i < 8; i++) {
+ u8 mtrr = pat >> (8 * i);
+
+ if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
+ mtrr2protval[mtrr] = __cm_idx2pte(i);
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
+ u8 fallback = fallback_mtrr_type(i);
+ mtrr2protval[i] = mtrr2protval[fallback];
+ BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
+ }
+ }
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
} else
kvm_disable_tdp();
+ build_mtrr2protval();
return 0;
err:
@@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc;
}
+static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ /* Unlike Intel, AMD takes the guest's CR0.CD into account.
+ *
+ * AMD does not have IPAT. To emulate it for the case of guests
+ * with no assigned devices, just set everything to WB. If guests
+ * have assigned devices, however, we cannot force WB for RAM
+ * pages only, so use the guest PAT directly.
+ */
+ if (!kvm_arch_has_assigned_device(vcpu->kvm))
+ *g_pat = 0x0606060606060606;
+ else
+ *g_pat = vcpu->arch.pat;
+}
+
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+ u8 mtrr;
+
+ /*
+ * 1. MMIO: trust guest MTRR, so same as item 3.
+ * 2. No passthrough: always map as WB, and force guest PAT to WB as well
+ * 3. Passthrough: can't guarantee the result, try to trust guest.
+ */
+ if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
+ return 0;
+
+ mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+ return mtrr2protval[mtrr];
+}
+
static void init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
+ svm_set_guest_pat(svm, &save->g_pat);
save->cr3 = 0;
save->cr4 = 0;
}
@@ -1579,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* does not do it - this results in some delay at
* reboot
*/
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
@@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
+ case MSR_IA32_CR_PAT:
+ if (npt_enabled) {
+ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ return 1;
+ vcpu->arch.pat = data;
+ svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
+ mark_dirty(svm->vmcb, VMCB_NPT);
+ break;
+ }
+ /* fall through */
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
return true;
}
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
- return 0;
-}
-
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e856dd566f4c..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
u64 ipat = 0;
/* For VT-d and EPT combination
- * 1. MMIO: always map as UC
+ * 1. MMIO: guest may want to apply WC, trust it.
* 2. EPT with VT-d:
* a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest.
+ * result, try to trust guest. So the same as item 1.
* b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3.
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR
*/
- if (is_mmio) {
- cache = MTRR_TYPE_UNCACHABLE;
- goto exit;
- }
-
- if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK;
goto exit;
@@ -8655,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT;
- cache = MTRR_TYPE_UNCACHABLE;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ cache = MTRR_TYPE_WRBACK;
+ else
+ cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbaf44e8f0d3..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
@@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx);
memcpy(dest, src + offset, size);
- } else
- WARN_ON_ONCE(1);
+ }
valid -= feature;
}
@@ -6328,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
static void process_smi(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
+ struct desc_ptr dt;
char buf[512];
u32 cr0;
@@ -6360,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_x86_ops->set_cr4(vcpu, 0);
+ /* Undocumented: IDT limit is set to zero on entry to SMM. */
+ dt.address = dt.size = 0;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
@@ -7315,11 +7319,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu = kvm_x86_ops->vcpu_create(kvm, id);
- /*
- * Activate fpu unconditionally in case the guest needs eager FPU. It will be
- * deactivated soon if it doesn't.
- */
- kvm_x86_ops->fpu_activate(vcpu);
return vcpu;
}
@@ -8218,6 +8217,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
kvm_x86_ops->interrupt_allowed(vcpu);
}
+void kvm_arch_start_assignment(struct kvm *kvm)
+{
+ atomic_inc(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
+
+void kvm_arch_end_assignment(struct kvm *kvm)
+{
+ atomic_dec(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
+
+bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+ return atomic_read(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
+
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
atomic_inc(&kvm->arch.noncoherent_dma_count);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
return kvm_register_write(vcpu, reg, val);
}
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+ return !(kvm->arch.disabled_quirks & quirk);
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/fpu/internal.h>
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(&current->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(&current->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
!PageReserved(pfn_to_page(start_pfn + i)))
return 1;
- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-
return 0;
}
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pgprot_t prot;
int retval;
void __iomem *ret_addr;
- int ram_region;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- /* First check if whole region can be identified as RAM or not */
- ram_region = region_is_ram(phys_addr, size);
- if (ram_region > 0) {
- WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
- (unsigned long int)phys_addr,
- (unsigned long int)last_addr);
+ pfn = phys_addr >> PAGE_SHIFT;
+ last_pfn = last_addr >> PAGE_SHIFT;
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
return NULL;
}
- /* If could not be identified(-1), check page by page */
- if (ram_region < 0) {
- pfn = phys_addr >> PAGE_SHIFT;
- last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
- __ioremap_check_ram) == 1)
- return NULL;
- }
/*
* Mappings have to be page-aligned
*/
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_MPX)
+ return "[mpx]";
+ return NULL;
+}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
#define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h>
-static const char *mpx_mapping_name(struct vm_area_struct *vma)
-{
- return "[mpx]";
-}
-
-static struct vm_operations_struct mpx_vma_ops = {
- .name = mpx_mapping_name,
-};
-
-static int is_mpx_vma(struct vm_area_struct *vma)
-{
- return (vma->vm_ops == &mpx_vma_ops);
-}
-
static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
{
if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
/*
* This is really a simplified "vm_mmap". it only handles MPX
* bounds tables (the bounds directory is user-allocated).
- *
- * Later on, we use the vma->vm_ops to uniquely identify these
- * VMAs.
*/
static unsigned long mpx_mmap(unsigned long len)
{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
ret = -ENOMEM;
goto out;
}
- vma->vm_ops = &mpx_vma_ops;
if (vm_flags & VM_LOCKED) {
up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
* so stop immediately and return an error. This
* probably results in a SIGSEGV.
*/
- if (!is_mpx_vma(vma))
+ if (!(vma->vm_flags & VM_MPX))
return -EINVAL;
len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
* lots of tables even though we have no actual table
* entries in use.
*/
- while (next && is_mpx_vma(next))
+ while (next && (next->vm_flags & VM_MPX))
next = next->vm_next;
- while (prev && is_mpx_vma(prev))
+ while (prev && (prev->vm_flags & VM_MPX))
prev = prev->vm_prev;
/*
* We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
} else {
unsigned long addr;
unsigned long nr_pages =
- f->flush_end - f->flush_start / PAGE_SIZE;
+ (f->flush_end - f->flush_start) / PAGE_SIZE;
addr = f->flush_start;
while (addr < f->flush_end) {
__flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
-#define OFFSET1 44 /* number of bytes to jump */
+#define OFFSET1 47 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 33
+#define OFFSET2 36
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
/* prog = array->prog[index]; */
- EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */
- EMIT1(offsetof(struct bpf_array, prog));
+ EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
+ offsetof(struct bpf_array, prog));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
static int __init arch_parse_efi_cmdline(char *str)
{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
#include <asm/fpu/internal.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
fpu__resume_cpu();
}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda867a33..484145368a24 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+ depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte_t pte;
unsigned long pfn;
struct page *page;
+ unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte = pfn_pte(pfn, prot);
+ /*
+ * Careful: update_va_mapping() will fail if the virtual address
+ * we're poking isn't populated in the page tables. We don't
+ * need to worry about the direct map (that's always in the page
+ * tables), but we need to be careful about vmap space. In
+ * particular, the top level page table can lazily propagate
+ * entries between processes, so if we've switched mms since we
+ * vmapped the target in the first place, we might not have the
+ * top-level page table entry populated.
+ *
+ * We disable preemption because we want the same mm active when
+ * we probe the target and when we issue the hypercall. We'll
+ * have the same nominal mm, but if we're a kernel thread, lazy
+ * mm dropping could change our pgd.
+ *
+ * Out of an abundance of caution, this uses __get_user() to fault
+ * in the target address just in case there's some obscure case
+ * in which the target address isn't readable.
+ */
+
+ preempt_disable();
+
+ pagefault_disable(); /* Avoid warnings due to being atomic. */
+ __get_user(dummy, (unsigned char __user __force *)v);
+ pagefault_enable();
+
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
BUG();
} else
kmap_flush_unused();
+
+ preempt_enable();
}
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
+ /*
+ * We need to mark the all aliases of the LDT pages RO. We
+ * don't need to call vm_flush_aliases(), though, since that's
+ * only responsible for flushing aliases out the TLBs, not the
+ * page tables, and Xen will flush the TLB for us if needed.
+ *
+ * To avoid confusing future readers: none of this is necessary
+ * to load the LDT. The hypervisor only checks this when the
+ * LDT is faulted in due to subsequent descriptor access.
+ */
+
for(i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
-static inline void __init xen_init_apic(void)
-{
-}
#endif
+void __init xen_init_apic(void);
+
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 14d15bf1a95b..5b478accd5fc 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/xtensa/include/asm/mm-arch-hooks.h b/arch/xtensa/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d2e5cfd3dd02..000000000000
--- a/arch/xtensa/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H
-#define _ASM_XTENSA_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0436c21db7f2..719b7152aed1 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned long idx = BIO_POOL_NONE;
unsigned inline_vecs;
- if (!bs) {
+ if (!bs || !bs->bio_integrity_pool) {
bip = kmalloc(sizeof(struct bio_integrity_payload) +
sizeof(struct bio_vec) * nr_vecs, gfp_mask);
inline_vecs = nr_vecs;
@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset);
- if (bs) {
+ if (bs && bs->bio_integrity_pool) {
if (bip->bip_slab != BIO_POOL_NONE)
bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
bip->bip_slab);
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..d6e5ba3399f0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
* Allocates and returns a new bio which represents @sectors from the start of
* @bio, and updates @bio to represent the remaining sectors.
*
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
- * responsibility to ensure that @bio is not freed before the split.
+ * Unless this is a discard request the newly allocated bio will point
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+ * @bio is not freed before the split.
*/
struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
- split = bio_clone_fast(bio, gfp, bs);
+ /*
+ * Discards need a mutable bio_vec to accommodate the payload
+ * required by the DSM TRIM and UNMAP commands.
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ split = bio_clone_bioset(bio, gfp, bs);
+ else
+ split = bio_clone_fast(bio, gfp, bs);
+
if (!split)
return NULL;
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
bio->bi_css = blkcg_css;
return 0;
}
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
* bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
bio->bi_css = task_get_css(current, blkio_cgrp_id);
return 0;
}
+EXPORT_SYMBOL_GPL(bio_associate_current);
/**
* bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9f97da52d006..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -29,6 +29,14 @@
#define MAX_KEY_LEN 100
+/*
+ * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
+ * blkcg_pol_register_mutex nests outside of it and synchronizes entire
+ * policy [un]register operations including cgroup file additions /
+ * removals. Putting cgroup file registration outside blkcg_pol_mutex
+ * allows grabbing it from cgroup callbacks.
+ */
+static DEFINE_MUTEX(blkcg_pol_register_mutex);
static DEFINE_MUTEX(blkcg_pol_mutex);
struct blkcg blkcg_root;
@@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
+static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
- /*
- * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
- * which ends up putting cgroup's internal cgroup_tree_mutex under
- * it; however, cgroup_tree_mutex is nested above cgroup file
- * active protection and grabbing blkcg_pol_mutex from a cgroup
- * file operation creates a possible circular dependency. cgroup
- * internal locking is planned to go through further simplification
- * and this issue should go away soon. For now, let's trylock
- * blkcg_pol_mutex and restart the write on failure.
- *
- * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
- */
- if (!mutex_trylock(&blkcg_pol_mutex))
- return restart_syscall();
+ mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
/*
@@ -721,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
return -EINVAL;
disk = get_gendisk(MKDEV(major, minor), &part);
- if (!disk || part)
+ if (!disk)
+ return -EINVAL;
+ if (part) {
+ put_disk(disk);
return -EINVAL;
+ }
rcu_read_lock();
spin_lock_irq(disk->queue->queue_lock);
@@ -822,8 +823,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
{
struct blkcg *blkcg = css_to_blkcg(css);
- if (blkcg != &blkcg_root)
+ mutex_lock(&blkcg_pol_mutex);
+ list_del(&blkcg->all_blkcgs_node);
+ mutex_unlock(&blkcg_pol_mutex);
+
+ if (blkcg != &blkcg_root) {
+ int i;
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ kfree(blkcg->pd[i]);
kfree(blkcg);
+ }
}
static struct cgroup_subsys_state *
@@ -833,6 +843,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
struct cgroup_subsys_state *ret;
int i;
+ mutex_lock(&blkcg_pol_mutex);
+
if (!parent_css) {
blkcg = &blkcg_root;
goto done;
@@ -875,14 +887,17 @@ done:
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
+ list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
+
+ mutex_unlock(&blkcg_pol_mutex);
return &blkcg->css;
free_pd_blkcg:
for (i--; i >= 0; i--)
kfree(blkcg->pd[i]);
-
free_blkcg:
kfree(blkcg);
+ mutex_unlock(&blkcg_pol_mutex);
return ret;
}
@@ -1037,10 +1052,8 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
LIST_HEAD(pds);
- LIST_HEAD(cpds);
struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *nd;
- struct blkcg_policy_data *cpd, *cnd;
int cnt = 0, ret;
if (blkcg_policy_enabled(q, pol))
@@ -1053,10 +1066,7 @@ int blkcg_activate_policy(struct request_queue *q,
cnt++;
spin_unlock_irq(q->queue_lock);
- /*
- * Allocate per-blkg and per-blkcg policy data
- * for all existing blkgs.
- */
+ /* allocate per-blkg policy data for all existing blkgs */
while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) {
@@ -1064,15 +1074,6 @@ int blkcg_activate_policy(struct request_queue *q,
goto out_free;
}
list_add_tail(&pd->alloc_node, &pds);
-
- if (!pol->cpd_size)
- continue;
- cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
- if (!cpd) {
- ret = -ENOMEM;
- goto out_free;
- }
- list_add_tail(&cpd->alloc_node, &cpds);
}
/*
@@ -1082,32 +1083,17 @@ int blkcg_activate_policy(struct request_queue *q,
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
- if (WARN_ON(list_empty(&pds)) ||
- WARN_ON(pol->cpd_size && list_empty(&cpds))) {
+ if (WARN_ON(list_empty(&pds))) {
/* umm... this shouldn't happen, just abort */
ret = -ENOMEM;
goto out_unlock;
}
- cpd = list_first_entry(&cpds, struct blkcg_policy_data,
- alloc_node);
- list_del_init(&cpd->alloc_node);
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node);
/* grab blkcg lock too while installing @pd on @blkg */
spin_lock(&blkg->blkcg->lock);
- if (!pol->cpd_size)
- goto no_cpd;
- if (!blkg->blkcg->pd[pol->plid]) {
- /* Per-policy per-blkcg data */
- blkg->blkcg->pd[pol->plid] = cpd;
- cpd->plid = pol->plid;
- pol->cpd_init_fn(blkg->blkcg);
- } else { /* must free it as it has already been extracted */
- kfree(cpd);
- }
-no_cpd:
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
@@ -1124,8 +1110,6 @@ out_free:
blk_queue_bypass_end(q);
list_for_each_entry_safe(pd, nd, &pds, alloc_node)
kfree(pd);
- list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
- kfree(cpd);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1162,8 +1146,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
kfree(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
- kfree(blkg->blkcg->pd[pol->plid]);
- blkg->blkcg->pd[pol->plid] = NULL;
spin_unlock(&blkg->blkcg->lock);
}
@@ -1182,11 +1164,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
*/
int blkcg_policy_register(struct blkcg_policy *pol)
{
+ struct blkcg *blkcg;
int i, ret;
if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
return -EINVAL;
+ mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */
@@ -1195,19 +1179,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
if (!blkcg_policy[i])
break;
if (i >= BLKCG_MAX_POLS)
- goto out_unlock;
+ goto err_unlock;
- /* register and update blkgs */
+ /* register @pol */
pol->plid = i;
- blkcg_policy[i] = pol;
+ blkcg_policy[pol->plid] = pol;
+
+ /* allocate and install cpd's */
+ if (pol->cpd_size) {
+ list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+ struct blkcg_policy_data *cpd;
+
+ cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ if (!cpd) {
+ mutex_unlock(&blkcg_pol_mutex);
+ goto err_free_cpds;
+ }
+
+ blkcg->pd[pol->plid] = cpd;
+ cpd->plid = pol->plid;
+ pol->cpd_init_fn(blkcg);
+ }
+ }
+
+ mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
if (pol->cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
pol->cftypes));
- ret = 0;
-out_unlock:
+ mutex_unlock(&blkcg_pol_register_mutex);
+ return 0;
+
+err_free_cpds:
+ if (pol->cpd_size) {
+ list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+ kfree(blkcg->pd[pol->plid]);
+ blkcg->pd[pol->plid] = NULL;
+ }
+ }
+ blkcg_policy[pol->plid] = NULL;
+err_unlock:
mutex_unlock(&blkcg_pol_mutex);
+ mutex_unlock(&blkcg_pol_register_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_policy_register);
@@ -1220,7 +1234,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
*/
void blkcg_policy_unregister(struct blkcg_policy *pol)
{
- mutex_lock(&blkcg_pol_mutex);
+ struct blkcg *blkcg;
+
+ mutex_lock(&blkcg_pol_register_mutex);
if (WARN_ON(blkcg_policy[pol->plid] != pol))
goto out_unlock;
@@ -1229,9 +1245,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
if (pol->cftypes)
cgroup_rm_cftypes(pol->cftypes);
- /* unregister and update blkgs */
+ /* remove cpds and unregister */
+ mutex_lock(&blkcg_pol_mutex);
+
+ if (pol->cpd_size) {
+ list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+ kfree(blkcg->pd[pol->plid]);
+ blkcg->pd[pol->plid] = NULL;
+ }
+ }
blkcg_policy[pol->plid] = NULL;
-out_unlock:
+
mutex_unlock(&blkcg_pol_mutex);
+out_unlock:
+ mutex_unlock(&blkcg_pol_register_mutex);
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-core.c b/block/blk-core.c
index 82819e68f58b..627ed0c593fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
- sizeof(((struct request *)0)->cmd_flags));
+ FIELD_SIZEOF(struct request, cmd_flags));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f53779692c77..7d842db59699 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
- blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
+ blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state));
} else {
- device->power.state = state;
+ device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 8244f013f210..f1c966e05078 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
bool wp = addr->info.mem.write_protect;
u64 len = attr->address_length;
+ u64 start, end, offset = 0;
struct resource *res = &win->res;
/*
@@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
addr->min_address_fixed, addr->max_address_fixed, len);
- res->start = attr->minimum;
- res->end = attr->maximum;
-
/*
* For bridges that translate addresses across the bridge,
* translation_offset is the offset that must be added to the
@@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
* primary side. Non-bridge devices must list 0 for all Address
* Translation offset bits.
*/
- if (addr->producer_consumer == ACPI_PRODUCER) {
- res->start += attr->translation_offset;
- res->end += attr->translation_offset;
- } else if (attr->translation_offset) {
+ if (addr->producer_consumer == ACPI_PRODUCER)
+ offset = attr->translation_offset;
+ else if (attr->translation_offset)
pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
attr->translation_offset);
+ start = attr->minimum + offset;
+ end = attr->maximum + offset;
+
+ win->offset = offset;
+ res->start = start;
+ res->end = end;
+ if (sizeof(resource_size_t) < sizeof(u64) &&
+ (offset != win->offset || start != res->start || end != res->end)) {
+ pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
+ attr->minimum, attr->maximum);
+ return false;
}
switch (addr->resource_type) {
@@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
return false;
}
- win->offset = attr->translation_offset;
-
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+ if (acpi_video_get_backlight_type() != acpi_backlight_video)
+ acpi_video_unregister_backlight();
+}
+
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED &&
- acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ val == BACKLIGHT_REGISTERED)
+ schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
+#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
+#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..19bcb80b2031 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
@@ -2478,6 +2460,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+ dev->max_sectors);
+
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -4146,6 +4132,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
{ "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ /*
+ * Causes silent data corruption with higher max sects.
+ * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+ */
+ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4174,9 +4166,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4222,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4231,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ /* devices that don't properly handle TRIM commands */
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
* (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4497,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
else /* In the ancient relic department - skip all of this */
return 0;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* On some disks, this command causes spin-up, so we need longer timeout */
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
-/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
}
+ } else if (vendor == 0x11ab && devid == 0x4140) {
+ /* Marvell 4140 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* port 4 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 4)
+ link->flags |= ATA_LFLAG_DISABLED;
+ }
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
@@ -2568,7 +2551,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(args->id) &&
+ !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
+ else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ mode = "forced_unsupported";
else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index a9b0c820f2eb..5d9ee99c2148 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = {
module_platform_driver(arasan_cf_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* ECC initiliazation. */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
- if (!present) {
- kfree(blk);
- return -ENOMEM;
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
while ((entry = llist_del_all(&cq->list)) != NULL) {
entry = llist_reverse_order(entry);
do {
+ struct request_queue *q = NULL;
+
cmd = container_of(entry, struct nullb_cmd, ll_list);
entry = entry->next;
+ if (cmd->rq)
+ q = cmd->rq->q;
end_cmd(cmd);
- if (cmd->rq) {
- struct request_queue *q = cmd->rq->q;
-
- if (!q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock(q->queue_lock);
- }
+ if (q && !q->mq_ops && blk_queue_stopped(q)) {
+ spin_lock(q->queue_lock);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock(q->queue_lock);
}
} while (entry);
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d1d6141920d3..7920c2741b47 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_disk;
add_disk(ns->disk);
- if (ns->ms)
- revalidate_disk(ns->disk);
+ if (ns->ms) {
+ struct block_device *bd = bdget_disk(ns->disk, 0);
+ if (!bd)
+ return;
+ if (blkdev_get(bd, FMODE_READ, NULL)) {
+ bdput(bd);
+ return;
+ }
+ blkdev_reread_part(bd);
+ blkdev_put(bd, FMODE_READ);
+ }
return;
out_free_disk:
kfree(disk);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_osd_copyup_callback(obj_request);
+ else
+ obj_request_done_set(obj_request);
+}
+
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg)
{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_discard_callback(obj_request);
break;
case CEPH_OSD_OP_CALL:
+ rbd_osd_call_callback(obj_request);
+ break;
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
}
static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
+ dout("%s: obj %p\n", __func__, obj_request);
+
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
obj_request->type == OBJ_REQUEST_NODATA);
rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
if (!obj_request->result)
obj_request->xferred = obj_request->length;
- /* Finish up with the normal image object callback */
-
- rbd_img_obj_callback(obj_request);
+ obj_request_done_set(obj_request);
}
static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */
- orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
return;
}
- if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+ if (work_busy(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info)
{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
- list_add(&indirect_page->lru, &info->indirect_pages);
+ if (!info->feature_persistent) {
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
- rc = blkfront_setup_indirect(info);
+ rc = blkfront_gather_backend_features(info);
if (rc) {
kfree(copy);
return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_info *info)
{
- unsigned int indirect_segments, segs;
+ unsigned int segs;
int err, i;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err) {
- info->max_indirect_segments = 0;
+ if (info->max_indirect_segments == 0)
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
- } else {
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ else
segs = info->max_indirect_segments;
- }
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
}
/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+ int err;
+ int barrier, flush, discard, persistent;
+ unsigned int indirect_segments;
+
+ info->feature_flush = 0;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier,
+ NULL);
+
+ /*
+ * If there's no "feature-barrier" defined, then it means
+ * we're dealing with a very old backend which writes
+ * synchronously; nothing to do.
+ *
+ * If there are barriers, then we use flush.
+ */
+ if (!err && barrier)
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
+ /*
+ * And if there is "feature-flush-cache" use that above
+ * barriers.
+ */
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush,
+ NULL);
+
+ if (!err && flush)
+ info->feature_flush = REQ_FLUSH;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard,
+ NULL);
+
+ if (!err && discard)
+ blkfront_setup_discard(info);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u", &indirect_segments,
+ NULL);
+ if (err)
+ info->max_indirect_segments = 0;
+ else
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
+
+ return blkfront_setup_indirect(info);
+}
+
+/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
*/
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int physical_sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err != 1)
physical_sector_size = sector_size;
- info->feature_flush = 0;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
-
- /*
- * If there's no "feature-barrier" defined, then it means
- * we're dealing with a very old backend which writes
- * synchronously; nothing to do.
- *
- * If there are barriers, then we use flush.
- */
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
- /*
- * And if there is "feature-flush-cache" use that above
- * barriers.
- */
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
-
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
-
- if (!err && discard)
- blkfront_setup_discard(info);
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
-
- err = blkfront_setup_indirect(info);
+ err = blkfront_gather_backend_features(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
- char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
goto out_error;
}
- snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+ meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
/* Read Verbose Config Version Info */
skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
- get_unaligned_le16(skb->data + 5));
- kfree_skb(skb);
+ if (!IS_ERR(skb)) {
+ BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+ get_unaligned_le16(skb->data + 5));
+ kfree_skb(skb);
+ }
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+ if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed");
hwrng_fill = NULL;
}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 283f00a7f036..1082d4bb016a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev);
- chip->cdev.owner = chip->pdev->driver->owner;
cdev_init(&chip->cdev, &tpm_fops);
+ chip->cdev.owner = chip->pdev->driver->owner;
+ chip->cdev.kobj.parent = &chip->dev.kobj;
return chip;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 44f9d20c19ac..1267322595da 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
return -ENODEV;
}
+ /* At least some versions of AMI BIOS have a bug that TPM2 table has
+ * zero address for the control area and therefore we must fail.
+ */
+ if (!buf->control_area_pa) {
+ dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
+ return -EINVAL;
+ }
+
if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
dev_err(dev, "TPM2 ACPI table has wrong size");
return -EINVAL;
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index bdfb4421c643..f271c350ef94 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index dffd4ce6c8b5..58d678b5b40a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1afc18c4effc..1a722e99e76e 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 1b9b65bca51e..5ebddc528145 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 628b6d5ed3d9..157fe099ea6a 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 931737677dfa..9834944f08b1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
* Clock framework definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 4daa5977793a..222ce108b41a 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
* SPEAr1310 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 5a5c6648308d..973c9d3fbcf8 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
* SPEAr1340 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index bb5f387774e2..404a55edd613 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr3xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 4f649c9cb094..231061fa73a4 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr6xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 2d59038dec43..86c7eb66bdfb 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
BUG_ON(!imxtm->base);
imxtm->type = type;
+ imxtm->irq = irq;
_mxc_timer_init(imxtm);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b612411655f9..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ return policy && !policy_is_inactive(policy) ?
+ policy->freq_table : NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
+
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
@@ -993,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
int ret = 0;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
@@ -1010,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
unsigned int j;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
@@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
down_write(&policy->rwsem);
policy->cpu = cpu;
+ policy->governor = NULL;
up_write(&policy->rwsem);
}
@@ -1153,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ goto err_free_rcpumask;
+
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
- goto err_free_rcpumask;
+ goto err_free_real_cpus;
}
INIT_LIST_HEAD(&policy->policy_list);
@@ -1174,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
return policy;
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
@@ -1224,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_policy_put_kobj(policy, notify);
+ free_cpumask_var(policy->real_cpus);
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
kfree(policy);
@@ -1248,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
pr_debug("adding CPU %u\n", cpu);
- /*
- * Only possible if 'cpu' wasn't physically present earlier and we are
- * here from subsys_interface add callback. A hotplug notifier will
- * follow and we will handle it like logical CPU hotplug then. For now,
- * just create the sysfs link.
- */
- if (cpu_is_offline(cpu))
- return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
+ if (cpu_is_offline(cpu)) {
+ /*
+ * Only possible if we are here from the subsys_interface add
+ * callback. A hotplug notifier will follow and we will handle
+ * it as CPU online then. For now, just create the sysfs link,
+ * unless there is no policy or the link is already present.
+ */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+ ? add_cpu_dev_symlink(policy, cpu) : 0;
+ }
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
@@ -1297,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+ /* Remember which CPUs have been present at the policy creation time. */
+ if (!recover_policy)
+ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
@@ -1410,8 +1433,7 @@ nomem_out:
return ret;
}
-static int __cpufreq_remove_dev_prepare(struct device *dev,
- struct subsys_interface *sif)
+static int __cpufreq_remove_dev_prepare(struct device *dev)
{
unsigned int cpu = dev->id;
int ret = 0;
@@ -1427,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
+ if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
- return ret;
- }
}
down_write(&policy->rwsem);
@@ -1463,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
return ret;
}
-static int __cpufreq_remove_dev_finish(struct device *dev,
- struct subsys_interface *sif)
+static int __cpufreq_remove_dev_finish(struct device *dev)
{
unsigned int cpu = dev->id;
int ret;
@@ -1482,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
/* If cpu is last user of policy, free policy */
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- if (ret) {
+ if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
- return ret;
- }
}
/*
@@ -1496,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
- /* Free the policy only if the driver is getting removed. */
- if (sif)
- cpufreq_policy_free(policy, true);
-
return 0;
}
@@ -1511,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int ret;
-
- /*
- * Only possible if 'cpu' is getting physically removed now. A hotplug
- * notifier should have already been called and we just need to remove
- * link or free policy here.
- */
- if (cpu_is_offline(cpu)) {
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- struct cpumask mask;
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- if (!policy)
- return 0;
+ if (!policy)
+ return 0;
- cpumask_copy(&mask, policy->related_cpus);
- cpumask_clear_cpu(cpu, &mask);
+ if (cpu_online(cpu)) {
+ __cpufreq_remove_dev_prepare(dev);
+ __cpufreq_remove_dev_finish(dev);
+ }
- /*
- * Free policy only if all policy->related_cpus are removed
- * physically.
- */
- if (cpumask_intersects(&mask, cpu_present_mask)) {
- remove_cpu_dev_symlink(policy, cpu);
- return 0;
- }
+ cpumask_clear_cpu(cpu, policy->real_cpus);
+ if (cpumask_empty(policy->real_cpus)) {
cpufreq_policy_free(policy, true);
return 0;
}
- ret = __cpufreq_remove_dev_prepare(dev, sif);
+ if (cpu != policy->kobj_cpu) {
+ remove_cpu_dev_symlink(policy, cpu);
+ } else {
+ /*
+ * The CPU owning the policy object is going away. Move it to
+ * another suitable CPU.
+ */
+ unsigned int new_cpu = cpumask_first(policy->real_cpus);
+ struct device *new_dev = get_cpu_device(new_cpu);
- if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif);
+ dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
- return ret;
+ sysfs_remove_link(&new_dev->kobj, "cpufreq");
+ policy->kobj_cpu = new_cpu;
+ WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
+ }
+
+ return 0;
}
static void handle_update(struct work_struct *work)
@@ -2385,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev_prepare(dev, NULL);
+ __cpufreq_remove_dev_prepare(dev);
break;
case CPU_POST_DEAD:
- __cpufreq_remove_dev_finish(dev, NULL);
+ __cpufreq_remove_dev_finish(dev);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd3a978..fa3dd840a837 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
}
if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
@@ -225,7 +227,7 @@ err_cpufreq_reg:
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
- return -EINVAL;
+ return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index df14766a8e06..dfbbf981ed56 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
-
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- return policy ? policy->freq_table : NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
.get_max = core_get_max_pstate,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
.set = core_set_pstate,
},
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e362860c2b50..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,7 @@
#include <asm/clock.h>
#include <asm/idle.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
static uint nowait;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e8e2775c3821..48b7228563ad 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
- tick_freeze();
+ /*
+ * trace_suspend_resume() called by tick_freeze() for the last CPU
+ * executing it contains RCU usage regarded as invalid in the idle
+ * context, so tell RCU about that.
+ */
+ RCU_NONIDLE(tick_freeze());
/*
* The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping
@@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
- * last CPU executing it calls functions containing RCU read-side
+ * first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
RCU_NONIDLE(tick_unfreeze());
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..e4311ce0cd78 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..dd7e9f3f5b6b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
- memcpy(nx_ctx->priv.ctr.iv,
+ memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *iv = nx_ctx->priv.ctr.iv;
+ u8 iv[16];
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = nx_ctx->priv.ctr.iv;
+ desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 08ac6d48688c..92c993f08213 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -317,6 +317,7 @@ out:
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
@@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = nx_ctx->priv.gcm.iv;
+ desc.info = rctx->iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
@@ -424,8 +425,8 @@ out:
static int gcm_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
static int gcm_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
switch (key_len) {
case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return -EINVAL;
}
- memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
return 0;
}
@@ -148,32 +149,29 @@ out:
return rc;
}
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
{
- struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct nx_sg *out_sg;
- int len;
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_AES);
+ err = nx_crypto_ctx_aes_xcbc_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
- memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
- memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
- len = AES_BLOCK_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ return 0;
+}
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ memset(sctx, 0, sizeof *sctx);
return 0;
}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &len, nx_ctx->ap->sglen);
+
+ if (data_len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
do {
to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
(u8 *) sctx->buffer,
&data_len,
max_sg_len);
- if (data_len != sctx->count)
- return -EINVAL;
+ if (data_len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
}
data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
&data_len,
max_sg_len);
- if (data_len != to_process - sctx->count)
- return -EINVAL;
+ if (data_len != to_process - sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
&len, nx_ctx->ap->sglen);
- if (len != sctx->count)
- return -EINVAL;
+ if (len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+ if (len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init,
+ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 4e91bdb83c59..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,34 +29,28 @@
#include "nx.h"
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA256_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha256_init(struct shash_desc *desc) {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
- if (len != SHA256_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -77,7 +71,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
@@ -102,24 +96,28 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@@ -128,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -282,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha256_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index e6a58d2ee628..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,34 +28,29 @@
#include "nx.h"
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA512_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -76,7 +71,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
@@ -101,25 +96,28 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA512_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@@ -127,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@@ -140,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -288,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha512_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index f6198f29a4a8..436971343ff7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* entry points from the crypto tfm initializers */
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
{
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct nx_ccm_rctx));
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM);
}
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
{
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_GCM);
}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index de3ea8738146..cdff03a42ae7 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
#ifndef __NX_H__
#define __NX_H__
+#include <crypto/ctr.h>
+
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
#define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
u8 iv[16];
+};
+
+struct nx_gcm_priv {
u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN];
};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
#define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
u8 iv[16];
+};
+
+struct nx_ccm_priv {
u8 b0[16];
u8 iauth_tag[16];
u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
};
struct nx_ctr_priv {
- u8 iv[16];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct nx_crypto_ctx {
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 46307098f8ba..0a70e46d5416 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
- dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
return err;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
struct qat_alg_buf {
uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- if (atomic_add_return(1, &active_dev) == 1) {
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
int i;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
}
- return 0;
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_unregister(void)
{
- if (atomic_sub_return(1, &active_dev) == 0)
- return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
- return 0;
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_init(void)
{
- atomic_set(&active_dev, 0);
crypto_get_default_rng();
return 0;
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+#define ATC_MAX_DSCR_TRIALS 10
+
/*
* Initial number of descriptors to allocate for each channel. This could
* be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
*
* @current_len: the number of bytes left before reading CTRLA
* @ctrla: the value of CTRLA
- * @desc: the descriptor containing the transfer width
*/
-static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
- struct at_desc *desc)
+static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
{
- return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
-}
+ u32 btsize = (ctrla & ATC_BTSIZE_MAX);
+ u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
-/**
- * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
- * to the current value of CTRLA.
- *
- * @current_len: the number of bytes left before reading CTRLA
- * @atchan: the channel to read CTRLA for
- * @desc: the descriptor containing the transfer width
- */
-static inline int atc_calc_bytes_left_from_reg(int current_len,
- struct at_dma_chan *atchan, struct at_desc *desc)
-{
- u32 ctrla = channel_readl(atchan, CTRLA);
-
- return atc_calc_bytes_left(current_len, ctrla, desc);
+ /*
+ * According to the datasheet, when reading the Control A Register
+ * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
+ * number of transfers completed on the Source Interface.
+ * So btsize is always a number of source width transfers.
+ */
+ return current_len - (btsize << src_width);
}
/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr;
+ u32 ctrla, dscr, trials;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* the channel's DSCR register and compare it against the value
* of the hardware linked list structure of each child
* descriptor.
+ *
+ * The CTRLA register provides us with the amount of data
+ * already read from the source for the current child
+ * descriptor. So we can compute a more accurate residue by also
+ * removing the number of bytes corresponding to this amount of
+ * data.
+ *
+ * However, the DSCR and CTRLA registers cannot be read both
+ * atomically. Hence a race condition may occur: the first read
+ * register may refer to one child descriptor whereas the second
+ * read may refer to a later child descriptor in the list
+ * because of the DMA transfer progression inbetween the two
+ * reads.
+ *
+ * One solution could have been to pause the DMA transfer, read
+ * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
+ * this approach presents some drawbacks:
+ * - If the DMA transfer is paused, RX overruns or TX underruns
+ * are more likey to occur depending on the system latency.
+ * Taking the USART driver as an example, it uses a cyclic DMA
+ * transfer to read data from the Receive Holding Register
+ * (RHR) to avoid RX overruns since the RHR is not protected
+ * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
+ * to compute the residue would break the USART driver design.
+ * - The atc_pause() function masks interrupts but we'd rather
+ * avoid to do so for system latency purpose.
+ *
+ * Then we'd rather use another solution: the DSCR is read a
+ * first time, the CTRLA is read in turn, next the DSCR is read
+ * a second time. If the two consecutive read values of the DSCR
+ * are the same then we assume both refers to the very same
+ * child descriptor as well as the CTRLA value read inbetween
+ * does. For cyclic tranfers, the assumption is that a full loop
+ * is "not so fast".
+ * If the two DSCR values are different, we read again the CTRLA
+ * then the DSCR till two consecutive read values from DSCR are
+ * equal or till the maxium trials is reach.
+ * This algorithm is very unlikely not to find a stable value for
+ * DSCR.
*/
- ctrla = channel_readl(atchan, CTRLA);
- rmb(); /* ensure CTRLA is read before DSCR */
dscr = channel_readl(atchan, DSCR);
+ rmb(); /* ensure DSCR is read before CTRLA */
+ ctrla = channel_readl(atchan, CTRLA);
+ for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ u32 new_dscr;
+
+ rmb(); /* ensure DSCR is read after CTRLA */
+ new_dscr = channel_readl(atchan, DSCR);
+
+ /*
+ * If the DSCR register value has not changed inside the
+ * DMA controller since the previous read, we assume
+ * that both the dscr and ctrla values refers to the
+ * very same descriptor.
+ */
+ if (likely(new_dscr == dscr))
+ break;
+
+ /*
+ * DSCR has changed inside the DMA controller, so the
+ * previouly read value of CTRLA may refer to an already
+ * processed descriptor hence could be outdated.
+ * We need to update ctrla to match the current
+ * descriptor.
+ */
+ dscr = new_dscr;
+ rmb(); /* ensure DSCR is read before CTRLA */
+ ctrla = channel_readl(atchan, CTRLA);
+ }
+ if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
if (desc_first->lli.dscr == dscr)
- return atc_calc_bytes_left(ret, ctrla, desc_first);
+ return atc_calc_bytes_left(ret, ctrla);
ret -= desc_first->len;
list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
}
/*
- * For the last descriptor in the chain we can calculate
+ * For the current descriptor in the chain we can calculate
* the remaining bytes using the channel's register.
- * Note that the transfer width of the first and last
- * descriptor may differ.
*/
- if (!desc->lli.dscr)
- ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
+ ret = atc_calc_bytes_left(ret, ctrla);
} else {
/* single transfer */
- ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
+ ctrla = channel_readl(atchan, CTRLA);
+ ret = atc_calc_bytes_left(ret, ctrla);
}
return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
desc->txd.cookie = -EBUSY;
desc->total_len = desc->len = len;
- desc->tx_width = dwidth;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
first->txd.cookie = -EBUSY;
first->total_len = len;
- /* set transfer width for the calculation of the residue */
- first->tx_width = src_width;
- prev->tx_width = src_width;
-
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first->txd.cookie = -EBUSY;
first->total_len = total_len;
- /* set transfer width for the calculation of the residue */
- first->tx_width = reg_width;
- prev->tx_width = reg_width;
-
/* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
desc->txd.cookie = 0;
desc->len = len;
- /*
- * Although we only need the transfer width for the first and
- * the last descriptor, its easier to set it to all descriptors.
- */
- desc->tx_width = src_width;
-
atc_desc_chain(&first, &prev, desc);
/* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->total_len = buf_len;
- first->tx_width = reg_width;
return &first->txd;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
#define ATC_SRC_WIDTH_WORD (0x2 << 24)
+#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
#define ATC_DST_WIDTH(x) ((x) << 28)
#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
* @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list
* @len: descriptor byte count
- * @tx_width: transfer width
* @total_len: total transaction byte count
*/
struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
- u32 tx_width;
size_t total_len;
/* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
* descriptor view 2 since some fields of the configuration register
* depend on transfer size and src/dest addresses.
*/
- if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
- at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
- } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
+ else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
- } else {
- /*
- * No need to write AT_XDMAC_CC reg, it will be done when the
- * descriptor is fecthed.
- */
+ else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
- }
+ /*
+ * Even if the register will be updated from the configuration in the
+ * descriptor when using view 2 or higher, the PROT bit won't be set
+ * properly. This bit can be modified only by using the channel
+ * configuration register.
+ */
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
reg |= AT_XDMAC_CNDC_NDDUP
| AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lld.mbr_sa = mem;
desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
- desc->lld.mbr_cfg = atchan->cfg;
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+ dwidth = at_xdmac_get_dwidth(atchan->cfg);
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
- ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+ ? dwidth
: AT_XDMAC_CC_DWIDTH_BYTE;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
| (len >> fixed_dwidth); /* microblock length */
+ desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+ AT_XDMAC_CC_DWIDTH(fixed_dwidth);
dev_dbg(chan2dev(chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
+
+ dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+ ch->device->privatecnt++;
+
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1022c2e1a2b0..cf1c87fa1edd 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- if (IS_ENABLED(__BIG_ENDIAN))
- config |= XOR_DESCRIPTOR_SWAP;
- else
- config &= ~XOR_DESCRIPTOR_SWAP;
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
- last->last = false;
+ desc->last = false;
dma_cookie_assign(&desc->txd);
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.brst_len = 1;
desc->rqcfg.brst_len = get_burst_len(desc, len);
+ desc->bytes_requested = len;
desc->txd.flags = flags;
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_move_tail(&vd->node, &vc->desc_submitted);
+ list_add_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
- if (async_tx_test_ack(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- if (async_tx_test_ack(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ vc->desc_free(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
- INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
- struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
- unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
- spin_lock_irqsave(&vc->lock, flags);
- list_add_tail(&vd->node, &vc->desc_allocated);
- spin_unlock_irqrestore(&vc->lock, flags);
-
return &vd->tx;
}
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
- * vchan_get_all_descriptors - obtain all allocated, submitted and issued
- * descriptors
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
- list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
- struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
- list_for_each_entry(vd, &head, node)
- async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_BLK_MEM_RDY 0xD074
#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
/* X-Gene SoC EFUSE csr register and bit defination */
#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
return -ENOMEM;
}
+ pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
+
/* Get efuse csr region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
if (!res) {
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
+ struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- kfree(palmas_usb->edev->name);
return status;
}
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
- kfree(palmas_usb->edev->name);
return status;
}
}
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
- kfree(palmas_usb->edev->name);
return status;
}
}
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
return 0;
}
-static int palmas_usb_remove(struct platform_device *pdev)
-{
- struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
-
- kfree(palmas_usb->edev->name);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int palmas_usb_suspend(struct device *dev)
{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
static struct platform_driver palmas_usb_driver = {
.probe = palmas_usb_probe,
- .remove = palmas_usb_remove,
.driver = {
.name = "palmas-usb",
.of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
{
- unsigned int id = EXTCON_NONE;
+ unsigned int id = -EINVAL;
int i = 0;
- if (edev->max_supported == 0)
- return -EINVAL;
-
- /* Find the the number of extcon cable */
+ /* Find the id of extcon cable */
while (extcon_name[i]) {
if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
id = i;
break;
}
+ i++;
}
- if (id == EXTCON_NONE)
+ return id;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+ unsigned int id;
+
+ if (edev->max_supported == 0)
return -EINVAL;
+ /* Find the the number of extcon cable */
+ id = find_cable_id_by_name(edev, name);
+ if (id < 0)
+ return id;
+
return find_cable_index_by_id(edev, id);
}
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_state);
+ int i = cable->cable_index;
+
return sprintf(buf, "%d\n",
extcon_get_cable_state_(cable->edev,
- cable->cable_index));
+ cable->edev->supported_cable[i]));
}
/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
spin_lock_irqsave(&edev->lock, flags);
if (edev->state != ((edev->state & ~mask) | (state & mask))) {
+ u32 old_state;
+
if (check_mutually_exclusive(edev, (edev->state & ~mask) |
(state & mask))) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
- for (index = 0; index < edev->max_supported; index++) {
- if (is_extcon_changed(edev->state, state, index, &attached))
- raw_notifier_call_chain(&edev->nh[index], attached, edev);
- }
-
+ old_state = edev->state;
edev->state &= ~mask;
edev->state |= state & mask;
+ for (index = 0; index < edev->max_supported; index++) {
+ if (is_extcon_changed(old_state, edev->state, index,
+ &attached))
+ raw_notifier_call_chain(&edev->nh[index],
+ attached, edev);
+ }
+
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
*/
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
{
- return extcon_get_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name));
+ unsigned int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_get_cable_state_(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state)
{
- return extcon_set_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name), cable_state);
+ unsigned int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_set_cable_state_(edev, id, cable_state);
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state);
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
return ret;
}
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+ int len)
{
struct cper_mem_err_compact cmem;
+ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+ if (len == sizeof(struct cper_sec_mem_err_old) &&
+ (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+ pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+ return;
+ }
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
printk("%s""section_type: memory error\n", newpfx);
- if (gdata->error_data_length >= sizeof(*mem_err))
- cper_print_mem(newpfx, mem_err);
+ if (gdata->error_data_length >=
+ sizeof(struct cper_sec_mem_err_old))
+ cper_print_mem(newpfx, mem_err,
+ gdata->error_data_length);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
static int __init parse_efi_cmdline(char *str)
{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 7a3cb1fa0a76..4630a8133ea6 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -87,6 +87,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
struct brcmstb_gpio_bank *bank;
int ret = 0;
+ if (!priv) {
+ dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
+ return -EFAULT;
+ }
+
+ /*
+ * You can lose return values below, but we report all errors, and it's
+ * more important to actually perform all of the steps.
+ */
list_for_each(pos, &priv->bank_list) {
bank = list_entry(pos, struct brcmstb_gpio_bank, node);
ret = bgpio_remove(&bank->bgc);
@@ -143,6 +152,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+ INIT_LIST_HEAD(&priv->bank_list);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
@@ -153,7 +164,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
priv->reg_base = reg_base;
priv->pdev = pdev;
- INIT_LIST_HEAD(&priv->bank_list);
if (brcmstb_gpio_sanity_check_banks(dev, np, res))
return -EINVAL;
@@ -221,8 +231,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
priv->num_banks, priv->gpio_base, gpio_base - 1);
- platform_set_drvdata(pdev, priv);
-
return 0;
fail:
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c5e05c82d67c..c246ac3dda7c 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->clr_falling);
writel_relaxed(~0, &g->clr_rising);
- /* set up all irqs in this bank */
- irq_set_chained_handler(bank_irq, gpio_irq_handler);
-
/*
* Each chip handles 32 gpios, and each irq bank consists of 16
* gpio irqs. Pass the irq bank's corresponding controller to
* the chained irq handler.
*/
- irq_set_handler_data(bank_irq, &chips[gpio / 32]);
+ irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
+ &chips[gpio / 32]);
binten |= BIT(bank);
}
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index aed4ca9338bc..7d3c90e9da71 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->base = gpio_start;
gc->ngpio = port;
gc->label = chip->client->name;
+ gc->dev = &chip->client->dev;
gc->owner = THIS_MODULE;
return port;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b0c57d505be7..61a731ff9a07 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
spin_lock_irqsave(&bank->lock, flags);
retval = omap_set_gpio_triggering(bank, offset, type);
- if (retval)
+ if (retval) {
+ spin_unlock_irqrestore(&bank->lock, flags);
goto error;
+ }
omap_gpio_init_irq(bank, offset);
if (!omap_gpio_is_input(bank, offset)) {
spin_unlock_irqrestore(&bank->lock, flags);
@@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->irq = res->start;
bank->dev = dev;
bank->chip.dev = dev;
+ bank->chip.owner = THIS_MODULE;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d233eb3b8132..50caeb1ee350 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
"could not connect irqchip to gpiochip\n");
return ret;
}
+
+ gpiochip_set_chained_irqchip(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ client->irq, NULL);
}
return 0;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 77fe5d3cb105..d5284dfe01fe 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
if (!chip->gpio_width[1])
return;
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET,
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_state[1]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET,
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_dir[1]);
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 2e87c4b8da26..a78882389836 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
gpiochip_remove(&gpio->chip);
clk_disable_unprepare(gpio->clk);
device_set_wakeup_capable(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..f7b49d5ce4b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
uint32_t me_feature_version;
uint32_t ce_feature_version;
uint32_t pfp_feature_version;
+ uint32_t rlc_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1614,6 +1617,9 @@ struct amdgpu_uvd {
#define AMDGPU_MAX_VCE_HANDLES 16
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
@@ -1626,6 +1632,7 @@ struct amdgpu_vce {
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
+ unsigned harvest_config;
};
/*
@@ -1635,6 +1642,7 @@ struct amdgpu_sdma {
/* SDMA firmware */
const struct firmware *fw;
uint32_t fw_version;
+ uint32_t feature_version;
struct amdgpu_ring ring;
};
@@ -1862,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+};
+
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -2004,7 +2018,7 @@ struct amdgpu_device {
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
- bool *ip_block_enabled;
+ struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..1f040d85ac47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ib *ib;
int i, j, r;
@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
if (r)
return r;
+ ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
r = amdgpu_fence_recreate(ring, p->filp,
deps[j].handle,
&fence);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
amdgpu_sync_fence(&ib->sync, fence);
amdgpu_fence_unref(&fence);
+ amdgpu_ctx_put(ctx);
}
}
@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
r = fence_wait_timeout(&fence->base, true, timeout);
amdgpu_fence_unref(&fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ba46be361c9b..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
- if (adev->ip_block_enabled == NULL)
+ adev->ip_block_status = kcalloc(adev->num_ip_blocks,
+ sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
+ if (adev->ip_block_status == NULL)
return -ENOMEM;
if (adev->ip_blocks == NULL) {
@@ -1203,14 +1204,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].valid = false;
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
- if (r)
+ if (r == -ENOENT)
+ adev->ip_block_status[i].valid = false;
+ else if (r)
return r;
+ else
+ adev->ip_block_status[i].valid = true;
+ } else {
+ adev->ip_block_status[i].valid = true;
}
- adev->ip_block_enabled[i] = true;
}
}
@@ -1222,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = amdgpu_wb_init(adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
/* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
return 0;
@@ -1260,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
@@ -1295,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return r;
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
+ adev->ip_block_status[i].hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].sw = false;
+ adev->ip_block_status[i].valid = false;
}
return 0;
@@ -1313,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r)
@@ -1577,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_enabled);
- adev->ip_block_enabled = NULL;
+ kfree(adev->ip_block_status);
+ adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ae43b58c9733..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -449,7 +449,7 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va)
+ struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_unlock;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+ if (operation == AMDGPU_VA_OP_MAP)
+ r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
- amdgpu_gem_va_update_vm(adev, bo_va);
+ amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
- }
- if (vm && ring->funcs->emit_gds_switch)
- amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
+ if (ring->funcs->emit_gds_switch)
+ amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+ ib->gds_base, ib->gds_size,
+ ib->gws_base, ib->gws_size,
+ ib->oa_base, ib->oa_size);
- if (ring->funcs->emit_hdp_flush)
- amdgpu_ring_emit_hdp_flush(ring);
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ }
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..3bfe67de8349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i]) {
+ adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i] &&
+ adev->ip_block_status[i].valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_INFO_FW_GFX_RLC:
fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->gfx.rlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0)
+ if (info->query_fw.index == 0) {
fw_info.ver = adev->gfx.mec_fw_version;
- else if (info->query_fw.index == 1)
+ fw_info.feature = adev->gfx.mec_feature_version;
+ } else if (info->query_fw.index == 1) {
fw_info.ver = adev->gfx.mec2_fw_version;
- else
+ fw_info.feature = adev->gfx.mec2_feature_version;
+ } else
return -EINVAL;
- fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_SMC:
fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (info->query_fw.index >= 2)
return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
break;
default:
return -EINVAL;
@@ -416,7 +417,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info;
+ struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;
@@ -459,6 +460,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
+ dev_info.vce_harvest_config = adev->vce.harvest_config;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
unsigned height_in_mb = ALIGN(height / 16, 2);
unsigned fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
image_size = width * height;
image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
min_dpb_size = image_size * num_dpb_buffer;
+ min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+ * 16 * num_dpb_buffer + 52 * 1024;
break;
default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
+ buf_sizes[0x4] = min_ctx_size;
return 0;
}
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
return -EINVAL;
}
+ } else if (cmd == 0x206) {
+ if ((end - start) < ctx->buf_sizes[4]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start),
+ ctx->buf_sizes[4]);
+ return -EINVAL;
+ }
} else if ((cmd != 0x100) && (cmd != 0x204)) {
DRM_ERROR("invalid UVD command %X!\n", cmd);
return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
[0x00000000] = 2048,
- [0x00000001] = 32 * 1024 * 1024,
- [0x00000002] = 2048 * 1152 * 3,
+ [0x00000001] = 0xFFFFFFFF,
+ [0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
+ [0x00000004] = 0xFFFFFFFF,
};
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
fw_data = (const __le32 *)
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f75a31df30bd..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
static void
cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
+ struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
+ struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+ u32 sclk, vclk, dclk, ecclk, tmp;
+ u16 vddnb, vddgfx;
+
+ if (sclk_index >= NUM_SCLK_LEVELS) {
+ seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
} else {
- sclk = table->entries[current_index].clk;
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
+ sclk = table->entries[sclk_index].clk;
+ seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+ }
+
+ tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ if (!pi->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ dclk = uvd_table->entries[uvd_index].dclk;
+ seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+ }
+ }
+
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
+ if (!pi->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+ }
}
}
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
if (ret)
return ret;
- DRM_INFO("DPM unforce state min=%d, max=%d.\n",
- pi->sclk_dpm.soft_min_clk,
- pi->sclk_dpm.soft_max_clk);
+ DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
+ pi->sclk_dpm.soft_min_clk,
+ pi->sclk_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amdgpu_dpm_forced_level level)
{
int ret = 0;
switch (level) {
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_highest(adev);
if (ret)
return ret;
break;
case AMDGPU_DPM_FORCED_LEVEL_LOW:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_lowest(adev);
if (ret)
return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
break;
}
+ adev->pm.dpm.forced_level = level;
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6e77964f1b64..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7f7abb0e0be5..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 08387dfd98a7..cc050a329c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
* sheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
-static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(
+ mec_hdr->ucode_feature_version);
gfx_v7_0_cp_compute_enable(adev, false);
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ mec2_hdr->ucode_feature_version);
/* MEC2 */
fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(
+ hdr->ucode_feature_version);
gfx_v7_0_rlc_stop(adev);
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
- for (queue = 0; queue < 8; i++) {
+ for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7b683fb2173c..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
DRM_DEBUG("\n");
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version = le32_to_cpu(
+ cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ cp_hdr->ucode_feature_version);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1813,10 +1835,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
- if (data & 1)
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- else
- data = 0;
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
@@ -1986,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
@@ -1994,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc5:
case 0x81:
@@ -2003,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc7:
case 0x87:
@@ -2018,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
break;
}
@@ -2278,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
- adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2364,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
- adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
- adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
- adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
- adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
gfx_v8_0_cp_gfx_enable(adev, false);
@@ -2625,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
- adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
@@ -2644,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
- adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
@@ -3128,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- 0x7FFFF << 2);
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3756,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
-static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -3783,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -3806,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
@@ -4227,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4243,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
+
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
if(idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
return 0;
}
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
+#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ unsigned ret;
+
+ if (adev->flags & AMDGPU_IS_APU)
+ tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ VCE_HARVEST_FUSE_MACRO__MASK) >>
+ VCE_HARVEST_FUSE_MACRO__SHIFT;
+ else
+ tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+ CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+ CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+ switch (tmp) {
+ case 1:
+ ret = AMDGPU_VCE_HARVEST_VCE0;
+ break;
+ case 2:
+ ret = AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ case 3:
+ ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int vce_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+ if ((adev->vce.harvest_config &
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+ return -ENOENT;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fa5a4448531d..68552da40287 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
+/* smu_8_0_d.h */
+#define mmMP0PUB_IND_INDEX 0x180
+#define mmMP0PUB_IND_DATA 0x181
+
+static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ r = RREG32(mmMP0PUB_IND_DATA);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ WREG32(mmMP0PUB_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
bool smc_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->smc_rreg = &vi_smc_rreg;
- adev->smc_wreg = &vi_smc_wreg;
+ if (adev->flags & AMDGPU_IS_APU) {
+ adev->smc_rreg = &cz_smc_rreg;
+ adev->smc_wreg = &cz_smc_wreg;
+ } else {
+ adev->smc_rreg = &vi_smc_rreg;
+ adev->smc_wreg = &vi_smc_wreg;
+ }
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42d2ffa08716..01ffe9bffe38 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- crtc->mode = *adj;
-
val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
if (val != dcrtc->dumb_ctrl) {
dcrtc->dumb_ctrl = val;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 580e10acaa3a..60a688ef81c7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
- dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
- DMA_TO_DEVICE);
+ if (dobj->sgt)
+ dma_buf_unmap_attachment(dobj->obj.import_attach,
+ dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c5b06fdb459c..e939faba7fcc 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,6 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
if (fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
-}
-static unsigned armada_limit(int start, unsigned size, unsigned max)
-{
- int end = start + size;
- if (end < 0)
- return 0;
- if (start < 0)
- start = 0;
- return (unsigned)end > max ? max - start : end - start;
+ wake_up(&dplane->vbl.wait);
}
static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_rect src = {
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
uint32_t val, ctrl0;
unsigned idx = 0;
+ bool visible;
int ret;
- crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
- crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+ 0, INT_MAX, true, false, &visible);
+ if (ret)
+ return ret;
+
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
/* Does the position/size result in nothing to display? */
- if (crtc_w == 0 || crtc_h == 0) {
+ if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
- }
-
- /*
- * FIXME: if the starting point is off screen, we need to
- * adjust src_x, src_y, src_w, src_h appropriately, and
- * according to the scale.
- */
if (!dcrtc->plane) {
dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) |
+ drm_rect_width(&src) >> 16;
dplane->src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
dplane->dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
dplane->dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+
return 0;
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
dcrtc->base + LCD_SPU_SRAM_PARA1);
}
- ret = wait_event_timeout(dplane->vbl.wait,
- list_empty(&dplane->vbl.update.node),
- HZ/25);
- if (ret < 0)
- return ret;
+ wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
if (plane->fb != fb) {
struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t sy, su, sv;
+ uint32_t addr[3], pixel_format;
+ int i, num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
older_fb);
}
- src_y >>= 16;
- src_x >>= 16;
- sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
- src_x * fb->bits_per_pixel / 8;
- su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
- src_x;
- sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
- src_x;
+ src_y = src.y1 >> 16;
+ src_x = src.x1 >> 16;
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ pixel_format = fb->pixel_format;
+ hsub = drm_format_horz_chroma_subsampling(pixel_format);
+ num_planes = drm_format_num_planes(pixel_format);
+
+ /*
+ * Annoyingly, shifting a YUYV-format image by one pixel
+ * causes the U/V planes to toggle. Toggle the UV swap.
+ * (Unfortunately, this causes momentary colour flickering.)
+ */
+ if (src_x & (hsub - 1) && num_planes == 1)
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ for (i = 0; i < num_planes; i++)
+ addr[i] = obj->dev_addr + fb->offsets[i] +
+ src_y * fb->pitches[i] +
+ src_x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < ARRAY_SIZE(addr); i++)
+ addr[i] = 0;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
LCD_SPU_DMA_PITCH_UV);
}
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
if (dplane->src_hw != val) {
dplane->src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
if (dplane->dst_hw != val) {
dplane->dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
if (dplane->dst_yx != val) {
dplane->dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
+
if (dplane->ctrl0 != ctrl0) {
dplane->ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
static void armada_plane_destroy(struct drm_plane *plane)
{
- kfree(plane);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(dplane);
}
static int armada_plane_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..5ae5c6923128 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+ drm_crtc_vblank_reset(&crtc->base);
dc->crtc = &crtc->base;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..ef6182bc8e5e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- ret = atmel_hlcdc_dc_modeset_init(dev);
+ ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ dev_err(dev->dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
- drm_mode_config_reset(dev);
-
- ret = drm_vblank_init(dev, 1);
+ ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev->dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..9dcc7280e572 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
funcs = connector->helper_private;
- new_encoder = funcs->best_encoder(connector);
+
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector,
+ connector_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
}
+ if (WARN_ON(!connector_state->crtc))
+ return -EINVAL;
+
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b9ba06176eb1..fed748311b92 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* For some reason crtc x/y offsets are signed internally. */
- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ /*
+ * Universal plane src offsets are only 16.16, prevent havoc for
+ * drivers using universal plane code internally.
+ */
+ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
drm_modeset_lock_all(dev);
@@ -5395,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- connector->status = connector_status_unknown;
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..eb603f1defc2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+ list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
+ return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1294,7 +1295,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_connector *connector;
+ struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
- if (!connector) {
+ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+ if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
- list_del(&connector->destroy_list);
+ list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, connector);
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ kfree(port);
}
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index aa8bbb460c57..9cfcd0aef0df 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -70,6 +70,8 @@
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
+#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
+
typedef struct drm_version_32 {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+typedef struct drm_mode_fb_cmd232 {
+ u32 fb_id;
+ u32 width;
+ u32 height;
+ u32 pixel_format;
+ u32 flags;
+ u32 handles[4];
+ u32 pitches[4];
+ u32 offsets[4];
+ u64 modifier[4];
+} __attribute__((packed)) drm_mode_fb_cmd232_t;
+
+static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
+ struct drm_mode_fb_cmd232 req32;
+ struct drm_mode_fb_cmd2 __user *req64;
+ int i;
+ int err;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ req64 = compat_alloc_user_space(sizeof(*req64));
+
+ if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
+ || __put_user(req32.width, &req64->width)
+ || __put_user(req32.height, &req64->height)
+ || __put_user(req32.pixel_format, &req64->pixel_format)
+ || __put_user(req32.flags, &req64->flags))
+ return -EFAULT;
+
+ for (i = 0; i < 4; i++) {
+ if (__put_user(req32.handles[i], &req64->handles[i]))
+ return -EFAULT;
+ if (__put_user(req32.pitches[i], &req64->pitches[i]))
+ return -EFAULT;
+ if (__put_user(req32.offsets[i], &req64->offsets[i]))
+ return -EFAULT;
+ if (__put_user(req32.modifier[i], &req64->modifier[i]))
+ return -EFAULT;
+ }
+
+ err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+ if (err)
+ return err;
+
+ if (__get_user(req32.fb_id, &req64->fb_id))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+ [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
};
/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..b50fa0afd907 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void store_vblank(struct drm_device *dev, int crtc,
- unsigned vblank_count_inc,
+ u32 vblank_count_inc,
struct timeval *t_vblank)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..f1c6b76c127f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..4a00990e4ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..4706b56902b4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
}
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+ return;
+ }
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ if (ctx->int_en & MXR_INT_EN_VSYNC)
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
uint8_t *buf, size_t size)
{
- buf[PB(0)] = tda998x_cksum(buf, size);
-
reg_clear(priv, REG_DIP_IF_FLAGS, bit);
reg_write_range(priv, addr, buf, size);
reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
buf[PB(4)] = p->audio_frame[4];
buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+ buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
sizeof(buf));
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 542fac628b28..fd1de451c8c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -826,6 +826,7 @@ struct intel_context {
struct kref ref;
int user_handle;
uint8_t remap_slice;
+ struct drm_i915_private *i915;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
@@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
unsigned int cache_level:3;
unsigned int cache_dirty:1;
- unsigned int has_dma_mapping:1;
-
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
unsigned int pin_display;
@@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
+static inline int i915_debugfs_connector_add(struct drm_connector *connector)
+{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper = I915_READ(upper_reg); \
- u32 lower = I915_READ(lower_reg); \
- u32 tmp = I915_READ(upper_reg); \
- if (upper != tmp) { \
- upper = tmp; \
- lower = I915_READ(lower_reg); \
- WARN_ON(I915_READ(upper_reg) != upper); \
- } \
- (u64)upper << 32 | lower; })
+ u32 upper, lower, tmp; \
+ tmp = I915_READ(upper_reg); \
+ do { \
+ upper = tmp; \
+ lower = I915_READ(lower_reg); \
+ tmp = I915_READ(upper_reg); \
+ } while (upper != tmp); \
+ (u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 248fd1ac7b3a..52b446b27b4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->pages = st;
- obj->has_dma_mapping = true;
return 0;
}
@@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
sg_free_table(obj->pages);
kfree(obj->pages);
-
- obj->has_dma_mapping = false;
}
static void
@@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ i915_gem_gtt_finish_object(obj);
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
@@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sg_page_iter sg_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
goto err_pages;
+ }
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_mark_end(sg);
obj->pages = st;
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret)
+ goto err_pages;
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
@@ -2300,10 +2306,10 @@ err_pages:
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
- if (PTR_ERR(page) == -ENOSPC)
- return -ENOMEM;
- else
- return PTR_ERR(page);
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
}
request->emitted_jiffies = jiffies;
+ ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
@@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list)) {
- i915_gem_gtt_finish_object(obj);
+ if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
- }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3768,22 +3773,16 @@ search_free:
goto err_remove_node;
}
- ret = i915_gem_gtt_prepare_object(obj);
- if (ret)
- goto err_remove_node;
-
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
- goto err_finish_gtt;
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
return vma;
-err_finish_gtt:
- i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d65cbe6afb92..48afa777e94a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref,
- typeof(*ctx), ref);
+ struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
trace_i915_context_free(ctx);
@@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
+ ctx->i915 = dev_priv;
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..e9c2bfd85b52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
return PTR_ERR(sg);
obj->pages = sg;
- obj->has_dma_mapping = true;
return 0;
}
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
- obj->has_dma_mapping = false;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dcc6a88c560e..31e8269e6e3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- if (obj->has_dma_mapping)
- return 0;
-
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
@@ -1926,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, pte_flags);
+
+ /* Note the inconsistency here is due to absence of the
+ * aliasing ppgtt on gen4 and earlier. Though we always
+ * request PIN_USER for execbuffer (translated to LOCAL_BIND),
+ * without the appgtt, we cannot honour that request and so
+ * must substitute it with a global binding. Since we do this
+ * behind the upper layers back, we need to explicitly set
+ * the bound flag ourselves.
+ */
+ vma->bound |= GLOBAL_BIND;
+
}
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +1980,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (!obj->has_dma_mapping)
- dma_unmap_sg(&dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..8b5b784c62fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
- obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d61e74a08f82..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
- /* Since the swizzling may vary within an
- * object, we have no idea what the swizzling
- * is for any page in particular. Thus we
- * cannot migrate tiled pages using the GPU,
- * nor can we tell userspace what the exact
- * swizzling is for any object.
- */
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
}
if (dcc == 0xffffffff) {
@@ -474,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- args->phys_swizzle_mode = args->swizzle_mode;
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+ else
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1f4e5a32a16e..8fd431bcdfd3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -545,6 +545,26 @@ err:
return ret;
}
+static int
+__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
+ struct page **pvec, int num_pages)
+{
+ int ret;
+
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret) {
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+ obj->pages = NULL;
+ }
+
+ return ret;
+}
+
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (obj->userptr.work != &work->work) {
ret = 0;
} else if (pinned == num_pages) {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
pinned = 0;
}
}
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
}
}
} else {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
obj->userptr.work = NULL;
pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
+ i915_gem_gtt_finish_object(obj);
+
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 176de6322e4d..23aa04cded6b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn = NULL;
int ret;
- if (nr < DRM_COMMAND_BASE)
+ if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6bb72dca3ff..984e2fe6688c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static struct drm_i915_gem_request *
-ring_last_request(struct intel_engine_cs *ring)
-{
- return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list);
-}
-
static bool
-ring_idle(struct intel_engine_cs *ring)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
- i915_gem_request_completed(ring_last_request(ring), false));
+ i915_seqno_passed(seqno, ring->last_submitted_seqno));
}
static bool
@@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring)) {
+ if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 497cba5deb1e..849a2590e010 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ __entry->dev = ctx->i915->dev->primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..8e35e0d013df 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
- int ret;
- int i;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret, i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
return ret;
/* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
+ drm_atomic_helper_swap_state(dev, state);
/* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
+
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
- drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 647b1404c441..87476ff181dd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- /* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->state->enable);
-
intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
@@ -11829,7 +11826,9 @@ encoder_retry:
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -12591,7 +12590,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
continue;
if (!crtc_state->enable) {
- intel_crtc_disable(crtc);
+ if (crtc->state->enable)
+ intel_crtc_disable(crtc);
} else if (crtc->state->enable) {
intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
@@ -12626,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
modeset_update_crtc_power_domains(state);
- drm_atomic_helper_commit_planes(dev, state);
-
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
continue;
+ }
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
/* FIXME: add subpixel order */
@@ -12893,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
return 0;
}
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
-
- return plane_state->visible;
-}
-
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_atomic_state *state = NULL;
struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@@ -12945,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
intel_update_pipe_size(to_intel_crtc(set->crtc));
- primary_plane_was_visible = primary_plane_visible(set->crtc);
-
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
-
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
- }
-
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
@@ -13276,7 +13237,7 @@ intel_check_primary_plane(struct drm_plane *plane,
if (ret)
return ret;
- if (intel_crtc->active) {
+ if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
struct intel_plane_state *old_state =
to_intel_plane_state(plane->state);
@@ -13307,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
+
+ if (crtc_state)
+ intel_crtc->atomic.post_enable_primary = true;
}
/*
@@ -13319,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
if (!state->visible || !fb)
intel_crtc->atomic.disable_ips = true;
+ if (!state->visible && old_state->visible &&
+ crtc_state && !needs_modeset(&crtc_state->base))
+ intel_crtc->atomic.pre_disable_primary = true;
+
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -15036,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane_state *plane_state;
memset(crtc->config, 0, sizeof(*crtc->config));
+ crtc->config->base.crtc = &crtc->base;
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..1df0e1fe235f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
- 243000, 270000, 324000, 405000,
- 420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+}
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
return ARRAY_SIZE(skl_rates);
- } else if (IS_CHERRYVIEW(dev)) {
- *source_rates = chv_rates;
- return ARRAY_SIZE(chv_rates);
}
*source_rates = default_rates;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_5_4 >> 3) + 1;
else
return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..600afdbef8c9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+ return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
+ .atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
};
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..7f2161a1ff5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+
+ ctx_obj->dirty = true;
}
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e539314ae87e..4be66f60504d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,6 +275,13 @@ struct intel_engine_cs {
* Do we have some not yet emitted requests outstanding?
*/
struct drm_i915_gem_request *outstanding_lazy_request;
+ /**
+ * Seqno of request most recently submitted to request_list.
+ * Used exclusively by hang checker to avoid grabbing lock while
+ * inspecting request list.
+ */
+ u32 last_submitted_seqno;
+
bool gpu_caches_dirty;
wait_queue_head_t irq_queue;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..260389acfb77 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
+ unsigned size;
+ u64 offset;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
+ if (entry->offset == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
+ /* We use the low bits to encode extra flags as the register should
+ * be naturally aligned (and those that are not so aligned merely
+ * limit the available flags for that register).
+ */
+ offset = entry->offset;
+ size = entry->size;
+ size |= reg->offset ^ offset;
+
intel_runtime_pm_get(dev_priv);
- switch (entry->size) {
+ switch (size) {
+ case 8 | 1:
+ reg->val = I915_READ64_2x32(offset, offset+4);
+ break;
case 8:
- reg->val = I915_READ64(reg->offset);
+ reg->val = I915_READ64(offset);
break;
case 4:
- reg->val = I915_READ(reg->offset);
+ reg->val = I915_READ(offset);
break;
case 2:
- reg->val = I915_READ16(reg->offset);
+ reg->val = I915_READ16(offset);
break;
case 1:
- reg->val = I915_READ8(reg->offset);
+ reg->val = I915_READ8(offset);
break;
default:
- MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 214eceefc981..e671ad369416 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+ imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74a9ce40ddc4..b4deb9cf9d71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -21,6 +21,7 @@
#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
+#include <linux/of_graph.h>
#include "imx-drm.h"
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *panel_node;
+ struct device_node *port;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
- panel_node = of_parse_phandle(np, "fsl,panel", 0);
- if (panel_node) {
- imxpd->panel = of_drm_find_panel(panel_node);
- if (!imxpd->panel)
- return -EPROBE_DEFER;
+ /* port@1 is the output port */
+ port = of_graph_get_port_by_id(np, 1);
+ if (port) {
+ struct device_node *endpoint, *remote;
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (remote)
+ imxpd->panel = of_drm_find_panel(remote);
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
+ }
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
- enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
+ enum mdp4_frame_format frame_type;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
+ frame_type = mdp4_get_frame_format(fb);
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
+ int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ mdp5_plane_complete_commit(plane, plane_state);
+ }
+
mdp5_disable(mdp5_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
uint32_t nformats;
uint32_t formats[32];
-
- bool enabled;
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc;
}
-static int mdp5_plane_disable(struct drm_plane *plane)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
-
- DBG("%s: disable", mdp5_plane->name);
-
- if (mdp5_kms) {
- /* Release the memory we requested earlier from the SMP: */
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
-
- return 0;
-}
-
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
- mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mdp5_plane->flush_mask;
}
+/* called after vsync in thread context */
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+ if (!plane_enabled(plane->state)) {
+ DBG("%s: free SMP", mdp5_plane->name);
+ mdp5_smp_release(mdp5_kms->smp, pipe);
+ }
+}
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
- * free, or pending/in-use by a client. The updates happen in three steps:
+ * free:
+ * The block is free.
+ *
+ * pending:
+ * The block is allocated to some client and not free.
+ *
+ * configured:
+ * The block is allocated to some client, and assigned to that
+ * client in MDP5_MDP_SMP_ALLOC registers.
+ *
+ * inuse:
+ * The block is being actively used by a client.
+ *
+ * The updates happen in the following steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks not inuse or
- * pending by any other client are added to client's pending
- * set.
+ * blocks needed per client, and request. Blocks neither inuse nor
+ * configured nor pending by any other client are added to client's
+ * pending set.
+ * For shrinking, blocks in pending but not in configured can be freed
+ * directly, but those already in configured will be freed later by
+ * mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
+ * Current pending is copied to configured.
+ * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
+ * concurrently for the same pipe.
*
* 3) mdp5_smp_commit():
- * After next vblank, copy pending -> inuse. Optionally update
+ * After next vblank, copy configured -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks
*
+ * 4) mdp5_smp_release():
+ * Must be called after the pipe is disabled and no longer uses any SMB
+ *
* On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
+static void update_smp_state(struct mdp5_smp *smp,
+ u32 cid, mdp5_smp_state_t *assigned);
+
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending);
- /* don't clear in global smp_state until _commit() */
+
+ /* clear in global smp_state if not in configured
+ * otherwise until _commit()
+ */
+ if (!test_bit(blk, ps->configured))
+ clear_bit(blk, smp->state);
}
}
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- int i, nblks;
+ int i;
+ unsigned long flags;
+ int cnt = smp->blk_cnt;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ mdp5_smp_state_t assigned;
+ u32 cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+
+ /* clear hw assignment */
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
+ update_smp_state(smp, CID_UNUSED, &assigned);
+
+ /* free to global pool */
+ bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
+ bitmap_andnot(smp->state, smp->state, assigned, cnt);
+
+ /* clear client's infor */
+ bitmap_zero(ps->pending, cnt);
+ bitmap_zero(ps->configured, cnt);
+ bitmap_zero(ps->inuse, cnt);
+
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ }
- for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
- smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ /*
+ * if vblank has not happened since last smp_configure
+ * skip the configure for now
+ */
+ if (!bitmap_equal(ps->inuse, ps->configured, cnt))
+ continue;
+
+ bitmap_copy(ps->configured, ps->pending, cnt);
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, cid, &assigned);
}
}
-/* step #3: after vblank, copy pending -> inuse: */
+/* step #3: after vblank, copy configured -> inuse: */
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
* using, which can be released and made available to other
* clients:
*/
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
update_smp_state(smp, CID_UNUSED, &released);
}
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ bitmap_copy(ps->inuse, ps->configured, cnt);
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
+ mdp5_smp_state_t configured;
mdp5_smp_state_t pending;
};
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
timeout = ktime_add_ms(ktime_get(), 1000);
- ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
- if (ret) {
- WARN_ON(ret); // TODO unswap state back? or??
- commit_destroy(c);
- return ret;
- }
+ /* uninterruptible wait */
+ msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
* Fences:
*/
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout)
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
remaining_jiffies = timespec_to_jiffies(&ts);
}
- ret = wait_event_interruptible_timeout(priv->fence_event,
+ if (interruptible)
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+ else
+ ret = wait_event_timeout(priv->fence_event,
fence_completed(dev, fence),
remaining_jiffies);
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return -EINVAL;
}
- return msm_wait_fence_interruptable(dev, args->fence, &timeout);
+ return msm_wait_fence(dev, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout);
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
- ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ ret = msm_wait_fence(dev, fence, timeout, true);
}
/* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- BUG_ON(!msm_obj->sgt); /* should have already pinned! */
- return msm_obj->sgt;
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
+ return NULL;
+
+ return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..477cbb12809b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
nvif_client_fini(&cli->base);
usif_client_fini(cli);
+ kfree(cli);
}
static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
pm_runtime_get_sync(dev->dev);
+ mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
return 0;
}
+#if IS_ENABLED(CONFIG_IOMMU_API)
+
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
}
}
+#else
+
+static void nouveau_platform_probe_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+}
+
+#endif
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..7464aef34674 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TNT:
+ case NV_DEVICE_INFO_V0_CELSIUS:
+ case NV_DEVICE_INFO_V0_KELVIN:
+ case NV_DEVICE_INFO_V0_RANKINE:
+ case NV_DEVICE_INFO_V0_CURIE:
+ break;
case NV_DEVICE_INFO_V0_TESLA:
if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
+ NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
+ drm->device.info.family);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- if (RING_SPACE(chan, 49)) {
+ if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- if (show && nv_crtc->cursor.nvbo)
+ if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = RING_SPACE(chan, 59);
+ ret = RING_SPACE(chan, 58);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ FIRE_RING(chan);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = RING_SPACE(chan, 60);
+ ret = RING_SPACE(chan, 58);
if (ret) {
WARN_ON(1);
nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
default:
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
- return 0x0000;
+ return NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
gf100_gr_zbc_clear_depth(priv, index);
}
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+{
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+ bool gr_enabled, ctxsw_active, gr_busy;
+
+ do {
+ /*
+ * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+ * up-to-date
+ */
+ nv_rd32(priv, 0x400700);
+
+ gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
+ ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
+ gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+
+ if (!gr_enabled || (!gr_busy && !ctxsw_active))
+ return 0;
+ } while (time_before(jiffies, end_jiffies));
+
+ nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+ gr_enabled, ctxsw_active, gr_busy);
+ return -EAGAIN;
+}
+
void
gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
while (addr < next) {
nv_wr32(priv, 0x400200, addr);
- nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+ /**
+ * Wait for GR to go idle after submitting a
+ * GO_IDLE bundle
+ */
+ if ((addr & 0xffff) == 0xe100)
+ gf100_gr_wait_idle(priv);
+ nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
addr += init->pitch;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
int ppc_nr;
};
+int gf100_gr_wait_idle(struct gf100_gr_priv *);
void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
nvkm_perfctx_dtor(struct nvkm_object *object)
{
struct nvkm_pm *ppm = (void *)object->engine;
+ struct nvkm_perfctx *ctx = (void *)object;
+
mutex_lock(&nv_subdev(ppm)->mutex);
- nvkm_engctx_destroy(&ppm->context->base);
- ppm->context = NULL;
+ nvkm_engctx_destroy(&ctx->base);
+ if (ppm->context == ctx)
+ ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mutex_lock(&nv_subdev(ppm)->mutex);
if (ppm->context == NULL)
ppm->context = ctx;
- mutex_unlock(&nv_subdev(ppm)->mutex);
-
if (ctx != ppm->context)
- return -EBUSY;
+ ret = -EBUSY;
+ mutex_unlock(&nv_subdev(ppm)->mutex);
- return 0;
+ return ret;
}
struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u16 addr = nv_ro16(bios, init->offset + 5);
+ u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+
+ trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+ reg, addr, freq);
+ init->offset += 7;
+
+ init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u16 addr = nv_ro16(bios, init->offset + 5);
+ u32 data = nv_ro32(bios, addr);
+
+ trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+ reg, addr, data);
+ init->offset += 7;
+
+ init_wr32(init, addr, data);
+}
+
+/**
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
[0x56] = { init_condition_time },
[0x57] = { init_ltime },
[0x58] = { init_zm_reg_sequence },
+ [0x59] = { init_pll_indirect },
+ [0x5a] = { init_zm_reg_indirect },
[0x5b] = { init_sub_direct },
[0x5c] = { init_jump },
[0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
struct gt215_clk_info *info)
{
struct gt215_clk_priv *priv = (void *)clock;
- u32 oclk, sclk, sdiv, diff;
+ u32 oclk, sclk, sdiv;
+ s32 diff;
info->clk = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
nv_wr32(priv, 0x12004c, 0x4);
nv_wr32(priv, 0x122204, 0x2);
nv_rd32(priv, 0x122204);
+
+ /*
+ * Bug: increase clock timeout to avoid operation failure at high
+ * gpcclk rate.
+ */
+ nv_wr32(priv, 0x122354, 0x800);
+ nv_wr32(priv, 0x128328, 0x800);
+ nv_wr32(priv, 0x124320, 0x800);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
{
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
+ struct nvkm_subdev *subdev = (void *)priv;
+
+ mutex_lock(&subdev->mutex);
nvkm_mm_free(&priv->heap, &node->mem);
+ mutex_unlock(&subdev->mutex);
+
nvkm_instobj_destroy(&node->base);
}
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
struct nv04_instobj_priv *node;
struct nvkm_instobj_args *args = data;
+ struct nvkm_subdev *subdev = (void *)priv;
int ret;
if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
+ mutex_lock(&subdev->mutex);
ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
args->align, &node->mem);
+ mutex_unlock(&subdev->mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
encoder_mode = atombios_get_encoder_mode(encoder);
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- (ENCODER_MODE_IS_DP(encoder_mode) &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_mode_set(encoder, adjusted_mode);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8730562323a8..4a09947be244 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
tmp |= DPM_ENABLED;
break;
default:
- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->offset;
-
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
- AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_connector *connector, struct drm_display_mode *mode)
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 tmp = 0, offset;
+ u32 tmp = 0;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- struct cea_sad *sads, int sad_count)
+ struct cea_sad *sads, int sad_count)
{
- u32 offset;
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ int pin_count = 0;
+
+ if (!pin)
+ return;
+
+ if (rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (radeon_encoder_is_digital(encoder)) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (dig->pin == pin)
+ pin_count++;
+ }
+ }
+
+ if ((pin_count > 1) && (enable_mask == 0))
+ return;
+ }
+
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct cea_sad *sads;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
}
BUG_ON(!sads);
- radeon_encoder = to_radeon_encoder(encoder);
-
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u8 *sadb = NULL;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
- sad_count = drm_edid_to_speaker_allocation(
- radeon_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+ &sadb);
if (sad_count < 0) {
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
}
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = 0;
-
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
-
- radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
}
void radeon_audio_detect(struct drm_connector *connector,
+ struct drm_encoder *encoder,
enum drm_connector_status status)
{
- struct radeon_device *rdev;
- struct radeon_encoder *radeon_encoder;
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
- if (!connector || !connector->encoder)
+ if (!radeon_audio_chipset_supported(rdev))
return;
- rdev = connector->encoder->dev->dev_private;
-
- if (!radeon_audio_chipset_supported(rdev))
+ if (!radeon_encoder_is_digital(encoder))
return;
- radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
-
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
radeon_encoder->audio = rdev->audio.hdmi_funcs;
}
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ if (!dig->pin)
+ dig->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
+ }
} else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
}
}
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
}
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return -ENOENT;
- }
+ if (!connector)
+ return -EINVAL;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (dig && dig->afmt &&
- radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+ if (dig && dig->afmt && radeon_encoder->audio &&
+ radeon_encoder->audio->set_avi_packet)
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
buffer, sizeof(buffer));
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
- radeon_audio_set_mute(encoder, true);
+ if (!connector)
+ return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- radeon_audio_set_dto(encoder, mode->clock);
- radeon_audio_set_vbi_packet(encoder);
- radeon_hdmi_set_color_depth(encoder);
- radeon_audio_update_acr(encoder, mode->clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_set_mute(encoder, true);
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
- return;
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ radeon_audio_set_dto(encoder, mode->clock);
+ radeon_audio_set_vbi_packet(encoder);
+ radeon_hdmi_set_color_depth(encoder);
+ radeon_audio_update_acr(encoder, mode->clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
- radeon_audio_set_mute(encoder, false);
+ radeon_audio_set_mute(encoder, false);
+ } else {
+ radeon_hdmi_set_color_depth(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
-
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ if (!connector)
return;
+
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+ radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ else
+ radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
int radeon_audio_init(struct radeon_device *rdev);
void radeon_audio_detect(struct drm_connector *connector,
- enum drm_connector_status status);
+ struct drm_encoder *encoder,
+ enum drm_connector_status status);
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
u32 offset, u32 reg);
void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+ if (hss > lvds->native_mode.hdisplay)
+ hss = (10 - 1) * 8;
+
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && radeon_connector->use_digital) {
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+
+ encoder = connector_funcs->best_encoder(connector);
+ if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && encoder) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa95a47e..c4777c8d0312 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
}
/**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
page_base += RADEON_GPU_PAGE_SIZE;
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 013ec7106e55..3dcc5733ff69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+ radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* we can race here at startup, some boards seem to trigger
+ * hotplug irqs when they shouldn't. */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 07909d817381..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
- struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ struct r600_audio_pin *pin;
int active_mst_links;
};
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d4855c..676362769b8d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
- radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1dbdf3230dae..787cd8fd897f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 01b558fe3695..9a0c2911272a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
.pm = &rockchip_drm_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 77d52893d40f..002645bb5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *fb_helper = &private->fbdev_helper;
- drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eb2282cc4a56..eba5f8a52fbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
&rk_obj->dma_attrs);
}
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+
{
+ int ret;
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
- unsigned long vm_size;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vm_size = vma->vm_end - vma->vm_start;
-
- if (vm_size > obj->size)
- return -EINVAL;
+ /*
+ * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+ * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
- return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, &rk_obj->dma_attrs);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
}
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_object *obj;
- struct drm_vma_offset_node *node;
+ struct drm_device *drm = obj->dev;
int ret;
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
+ mutex_lock(&drm->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&drm->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ return rockchip_drm_gem_object_mmap(obj, vma);
+}
- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (!node) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to find vma node.\n");
- return -EINVAL;
- } else if (!drm_vma_node_is_allowed(node, filp)) {
- mutex_unlock(&dev->struct_mutex);
- return -EACCES;
- }
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
- obj = container_of(node, struct drm_gem_object, vma_node);
- ret = rockchip_gem_mmap_buf(obj, vma);
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
- mutex_unlock(&dev->struct_mutex);
+ obj = vma->vm_private_data;
- return ret;
+ return rockchip_drm_gem_object_mmap(obj, vma);
}
struct rockchip_gem_object *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index dc65161d7cad..34b78e736532 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -170,6 +170,7 @@ struct vop_win_phy {
struct vop_reg enable;
struct vop_reg format;
+ struct vop_reg rb_swap;
struct vop_reg act_info;
struct vop_reg dsp_info;
struct vop_reg dsp_st;
@@ -199,8 +200,12 @@ struct vop_data {
static const uint32_t formats_01[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
static const uint32_t formats_234[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
};
static const struct vop_win_phy win01_data = {
@@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
.nformats = ARRAY_SIZE(formats_01),
.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
.act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
.nformats = ARRAY_SIZE(formats_234),
.enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
.dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
.dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
-static const struct vop_win_phy cursor_data = {
- .data_formats = formats_234,
- .nformats = ARRAY_SIZE(formats_234),
- .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
- .format = VOP_REG(HWC_CTRL0, 0x7, 1),
- .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
-};
-
static const struct vop_ctrl ctrl_data = {
.standby = VOP_REG(SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
/*
* Note: rk3288 has a dedicated 'cursor' window, however, that window requires
* special support to get alpha blending working. For now, just use overlay
- * window 1 for the drm cursor.
+ * window 3 for the drm cursor.
+ *
*/
static const struct vop_win_data rk3288_vop_win_data[] = {
{ .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3288_vop = {
@@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
}
}
+static bool has_rb_swapped(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
+ default:
+ return false;
+ }
+}
+
static enum vop_data_format vop_convert_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return VOP_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
return VOP_FMT_RGB888;
case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
return VOP_FMT_RGB565;
case DRM_FORMAT_NV12:
return VOP_FMT_YUV420SP;
@@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
return true;
default:
return false;
@@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
enum vop_data_format format;
uint32_t val;
bool is_alpha;
+ bool rb_swap;
bool visible;
int ret;
struct drm_rect dest = {
@@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
return 0;
is_alpha = is_alpha_support(fb->pixel_format);
+ rb_swap = has_rb_swapped(fb->pixel_format);
format = vop_convert_format(fb->pixel_format);
if (format < 0)
return format;
@@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
val = (dsp_sty - 1) << 16;
val |= (dsp_stx - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_st, val);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
if (is_alpha) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3077f1554099..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
- npages = count;
- if (pool->npages_free > _manager->options.max_size) {
+ /*
+ * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
+ * to free in order to minimize calls to set_memory_wb().
+ */
+ if (pool->npages_free >= (_manager->options.max_size +
+ NUM_PAGES_TO_ALLOC))
npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6d2f39d36e44..00f2058944e5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
+ for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
gc->reg_base = ipu->cm_reg;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
struct cp2112_force_read_report report;
int ret;
+ if (size > sizeof(dev->read_data))
+ size = sizeof(dev->read_data);
report.report = CP2112_DATA_READ_FORCE_SEND;
report.length = cpu_to_be16(size);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
static void hidinput_cleanup_battery(struct hid_device *dev)
{
+ const struct power_supply_desc *psy_desc;
+
if (!dev->battery)
return;
+ psy_desc = dev->battery->desc;
power_supply_unregister(dev->battery);
- kfree(dev->battery->desc->name);
- kfree(dev->battery->desc);
+ kfree(psy_desc->name);
+ kfree(psy_desc);
dev->battery = NULL;
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/*
* some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
* for the stylus.
+ * The check for mt_report_id ensures we don't process
+ * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+ * collection, but within the report ID.
*/
if (field->physical == HID_DG_STYLUS)
return 0;
+ else if ((field->physical == 0) &&
+ (field->report->id != td->mt_report_id) &&
+ (td->mt_report_id != -1))
+ return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
+ p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,17 +1271,52 @@ fail_leds:
pad_input_dev = NULL;
wacom_wac->pad_registered = false;
fail_register_pad_input:
- input_unregister_device(touch_input_dev);
+ if (touch_input_dev)
+ input_unregister_device(touch_input_dev);
wacom_wac->touch_input = NULL;
wacom_wac->touch_registered = false;
fail_register_touch_input:
- input_unregister_device(pen_input_dev);
+ if (pen_input_dev)
+ input_unregister_device(pen_input_dev);
wacom_wac->pen_input = NULL;
wacom_wac->pen_registered = false;
fail_register_pen_input:
return error;
}
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+ if (features->x_resolution) {
+ features->x_phy = (features->x_max * 100) /
+ features->x_resolution;
+ features->y_phy = (features->y_max * 100) /
+ features->y_resolution;
+ }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+ /* set unit to "100th of a mm" for devices not reported by HID */
+ if (!features->unit) {
+ features->unit = 0x11;
+ features->unitExpo = -3;
+ }
+
+ features->x_resolution = wacom_calc_hid_res(features->x_max,
+ features->x_phy,
+ features->unit,
+ features->unitExpo);
+ features->y_resolution = wacom_calc_hid_res(features->y_max,
+ features->y_phy,
+ features->unit,
+ features->unitExpo);
+}
+
static void wacom_wireless_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1339,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
if (wacom_wac1->features.type != INTUOSHT &&
wacom_wac1->features.type != BAMBOO_PT)
wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+ wacom_set_default_phy(&wacom_wac1->features);
+ wacom_calculate_res(&wacom_wac1->features);
snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1357,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac2->features =
*((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_set_default_phy(&wacom_wac2->features);
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ wacom_calculate_res(&wacom_wac2->features);
snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
"%s (WL) Finger",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1405,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
}
}
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
- if (features->x_resolution) {
- features->x_phy = (features->x_max * 100) /
- features->x_resolution;
- features->y_phy = (features->y_max * 100) /
- features->y_resolution;
- }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
- /* set unit to "100th of a mm" for devices not reported by HID */
- if (!features->unit) {
- features->unit = 0x11;
- features->unitExpo = -3;
- }
-
- features->x_resolution = wacom_calc_hid_res(features->x_max,
- features->x_phy,
- features->unit,
- features->unitExpo);
- features->y_resolution = wacom_calc_hid_res(features->y_max,
- features->y_phy,
- features->unit,
- features->unitExpo);
-}
-
static size_t wacom_compute_pktlen(struct hid_device *hdev)
{
struct hid_report_enum *report_enum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
features->x_max = 4096;
features->y_max = 4096;
}
+ else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ }
}
/*
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ {
+ /*
+ * CPU fan speed going up and down on Dell Studio XPS 8100
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8100",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+ },
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table) ||
+ dmi_check_system(i8k_blacklist_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
{ .compatible = "gmt,g763" },
{ },
};
+MODULE_DEVICE_TABLE(of, g762_dt_match);
/*
* Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
}
static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
- unsigned int voltage)
+ unsigned long voltage)
{
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
return sprintf(buf, "%d\n", val);
}
-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
- if (val > 1 || (val && !data->fan_mode[index]))
+ if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
return -EINVAL;
ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
- val ? data->fan_mode[index] : 0);
+ val == 2 ? data->fan_mode[index] : 0);
return ret ? ret : count;
}
-/* Return 0 for manual mode or 1 for SmartFan mode */
-static ssize_t show_mode(struct device *dev,
- struct device_attribute *devattr, char *buf)
+/* Return 1 for manual mode or 2 for SmartFan mode */
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
if (val < 0)
return val;
- return sprintf(buf, "%d\n", val ? 1 : 0);
+ return sprintf(buf, "%d\n", val ? 2 : 1);
}
/* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 0);
-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 1);
-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 2);
-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 3);
static struct attribute *nct7904_fanctl_attrs[] = {
- &sensor_dev_attr_fan1_pwm.dev_attr.attr,
- &sensor_dev_attr_fan1_mode.dev_attr.attr,
- &sensor_dev_attr_fan2_pwm.dev_attr.attr,
- &sensor_dev_attr_fan2_mode.dev_attr.attr,
- &sensor_dev_attr_fan3_pwm.dev_attr.attr,
- &sensor_dev_attr_fan3_mode.dev_attr.attr,
- &sensor_dev_attr_fan4_pwm.dev_attr.attr,
- &sensor_dev_attr_fan4_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm4_enable.dev_attr.attr,
NULL
};
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
{"nct7904", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
static struct i2c_driver nct7904_driver = {
.class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iface);
- dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+ dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
"regs_base@%p\n", iface->regs_base);
return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
module_exit(i2c_bfin_twi_exit);
MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
-MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* enable test mode */
reg |= OMAP_I2C_SYSTEST_ST_EN;
+ /* select SDA/SCL IO mode */
+ reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
+ /* set SCL to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SCL_O;
+ /* set SDA to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* restore reset values */
reg &= ~OMAP_I2C_SYSTEST_ST_EN;
+ reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
+ reg &= ~OMAP_I2C_SYSTEST_SCL_O;
+ reg &= ~OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e6d4935161e4..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
+ bri->set_scl(adap, val);
+ ndelay(RECOVERY_NDELAY);
+
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
*/
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
- adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_client *client;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_client(dev);
+ client = i2c_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_adapter *adapter;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_adapter(dev);
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
}
EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
#else
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 4e70f51c2370..cc5a35750b50 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
{
int i;
- for (i = from; i >= 0; i++) {
+ for (i = from; i >= 0; i--) {
if (data->triggers[i].indio_trig) {
iio_trigger_unregister(data->triggers[i].indio_trig);
data->triggers[i].indio_trig = NULL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
static const struct iio_event_spec mma8452_transient_event[] = {
{
- .type = IIO_EV_TYPE_THRESH,
+ .type = IIO_EV_TYPE_MAG,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),
.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7c5565891cb8..eb0cd897714a 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -153,8 +153,7 @@ config DA9150_GPADC
config CC10001_ADC
tristate "Cosmic Circuits 10001 ADC driver"
- depends on HAVE_CLK || REGULATOR
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAVE_CLK && REGULATOR
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 8a0eb4a04fb5..7b40925dd4ff 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -182,7 +182,7 @@ struct at91_adc_caps {
u8 ts_pen_detect_sensitivity;
/* startup time calculate function */
- u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+ u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
u8 num_channels;
struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
u8 num_channels;
void __iomem *reg_base;
struct at91_adc_reg_desc *registers;
- u8 startup_time;
+ u32 startup_time;
u8 sample_hold_time;
bool sleep_mode;
struct iio_trigger **trig;
@@ -779,7 +779,7 @@ ret:
return ret;
}
-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
{
/*
* Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
}
-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
{
/*
* For sama5d3x and at91sam9x5, the formula changes to:
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
+ adc->chip_info = chip_info;
+
adc->transfer[0].tx_buf = &adc->tx_buf;
adc->transfer[0].len = sizeof(adc->tx_buf);
adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 8d4e019ea4ca..9c311c1e1ac7 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
};
module_platform_driver(rockchip_saradc_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Rockchip SARADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 06f4792240f0..ebe415f10640 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl4030_madc_threaded_irq_handler,
- IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "twl4030_madc", madc);
if (ret) {
dev_err(&pdev->dev, "could not request irq\n");
goto err_i2c;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
struct vf610_adc *info = iio_priv(indio_dev);
if ((readval == NULL) ||
- (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
return -EINVAL;
*readval = readl(info->regs + reg);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 610fc98f88ef..595511022795 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
+ if (!atomic_read(&st->user_requested_state))
+ return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
poll_value = hid_sensor_read_poll_value(st);
} else {
- if (!atomic_dec_and_test(&st->data_ready))
+ int val;
+
+ val = atomic_dec_if_positive(&st->data_ready);
+ if (val < 0)
return 0;
+
sensor_hub_device_close(st->hsdev);
state_val = hid_sensor_get_usage_index(st->hsdev,
st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
{
+
#ifdef CONFIG_PM
int ret;
+ atomic_set(&st->user_requested_state, state);
if (state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
return 0;
#else
+ atomic_set(&st->user_requested_state, state);
return _hid_sensor_power_state(st, state);
#endif
}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 61bb9d4239ea..e98428df0d44 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -22,7 +22,7 @@
#include "ad5624r.h"
static int ad5624r_spi_write(struct spi_device *spi,
- u8 cmd, u8 addr, u16 val, u8 len)
+ u8 cmd, u8 addr, u16 val, u8 shift)
{
u32 data;
u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
* 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
* for the AD5664R, AD5644R, and AD5624R, respectively.
*/
- data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
msg[0] = data >> 16;
msg[1] = data >> 8;
msg[2] = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 17d4bb15be4d..65ce86837177 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
return -EINVAL;
}
+static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result, i;
@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
.driver_module = THIS_MODULE,
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
+ .write_raw_get_fmt = &inv_write_raw_get_fmt,
.attrs = &inv_attribute_group,
.validate_trigger = inv_mpu6050_validate_trigger,
};
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e6198b7c9cbf..a5c59251ec0e 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -188,6 +188,7 @@ config SENSORS_LM3533
config LTR501
tristate "LTR-501ALS-01 light sensor"
depends on I2C
+ select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -201,6 +202,7 @@ config LTR501
config STK3310
tristate "STK3310 ALS and proximity sensor"
depends on I2C
+ select REGMAP_I2C
help
Say yes here to get support for the Sensortek STK3310 ambient light
and proximity sensor. The STK3311 model is also supported by this
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 869033e48a1f..a1d4905cc9d2 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
if (val == cm3323_int_time[i].val &&
val2 == cm3323_int_time[i].val2) {
- reg_conf = data->reg_conf;
+ reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
reg_conf |= i << CM3323_CONF_IT_SHIFT;
ret = i2c_smbus_write_word_data(data->client,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 1ef7d3773ab9..b5a0e66b5f28 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data)
if (ret < 0)
return ret;
- data->als_contr = ret | data->chip_info->als_mode_active;
+ data->als_contr = status | data->chip_info->als_mode_active;
ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status);
if (ret < 0)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index fee4297d7c8f..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -43,7 +43,6 @@
#define STK3311_CHIP_ID_VAL 0x1D
#define STK3310_PSINT_EN 0x01
#define STK3310_PS_MAX_VAL 0xFFFF
-#define STK3310_THRESH_MAX 0xFFFF
#define STK3310_DRIVER_NAME "stk3310"
#define STK3310_REGMAP_NAME "stk3310_regmap"
@@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint =
REG_FIELD(STK3310_REG_FLAG, 4, 4);
static const struct reg_field stk3310_reg_field_flag_nf =
REG_FIELD(STK3310_REG_FLAG, 0, 0);
-/*
- * Maximum PS values with regard to scale. Used to export the 'inverse'
- * PS value (high values for far objects, low values for near objects).
- */
+
+/* Estimate maximum proximity values with regard to measurement scale. */
static const int stk3310_ps_max[4] = {
- STK3310_PS_MAX_VAL / 64,
- STK3310_PS_MAX_VAL / 16,
- STK3310_PS_MAX_VAL / 4,
- STK3310_PS_MAX_VAL,
+ STK3310_PS_MAX_VAL / 640,
+ STK3310_PS_MAX_VAL / 160,
+ STK3310_PS_MAX_VAL / 40,
+ STK3310_PS_MAX_VAL / 10
};
static const int stk3310_scale_table[][2] = {
@@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = {
/* Proximity event */
{
.type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
+ .dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
},
/* Out-of-proximity event */
{
.type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
+ .dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
},
@@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
int *val, int *val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
- unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
if (info != IIO_EV_INFO_VALUE)
return -EINVAL;
- /*
- * Only proximity interrupts are implemented at the moment.
- * Since we're inverting proximity values, the sensor's 'high'
- * threshold will become our 'low' threshold, associated with
- * 'near' events. Similarly, the sensor's 'low' threshold will
- * be our 'high' threshold, associated with 'far' events.
- */
+ /* Only proximity interrupts are implemented at the moment. */
if (dir == IIO_EV_DIR_RISING)
- reg = STK3310_REG_THDL_PS;
- else if (dir == IIO_EV_DIR_FALLING)
reg = STK3310_REG_THDH_PS;
+ else if (dir == IIO_EV_DIR_FALLING)
+ reg = STK3310_REG_THDL_PS;
else
return -EINVAL;
@@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
dev_err(&data->client->dev, "register read failed\n");
return ret;
}
- regmap_field_read(data->reg_ps_gain, &index);
- *val = swab16(stk3310_ps_max[index] - buf);
+ *val = be16_to_cpu(buf);
return IIO_VAL_INT;
}
@@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
int val, int val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
return -EINVAL;
if (dir == IIO_EV_DIR_RISING)
- reg = STK3310_REG_THDL_PS;
- else if (dir == IIO_EV_DIR_FALLING)
reg = STK3310_REG_THDH_PS;
+ else if (dir == IIO_EV_DIR_FALLING)
+ reg = STK3310_REG_THDL_PS;
else
return -EINVAL;
- buf = swab16(stk3310_ps_max[index] - val);
+ buf = cpu_to_be16(val);
ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
if (ret < 0)
dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
}
- *val = swab16(buf);
- if (chan->type == IIO_PROXIMITY) {
- /*
- * Invert the proximity data so we return low values
- * for close objects and high values for far ones.
- */
- regmap_field_read(data->reg_ps_gain, &index);
- *val = stk3310_ps_max[index] - *val;
- }
+ *val = be16_to_cpu(buf);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
@@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
}
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
IIO_EV_TYPE_THRESH,
- (dir ? IIO_EV_DIR_RISING :
- IIO_EV_DIR_FALLING));
+ (dir ? IIO_EV_DIR_FALLING :
+ IIO_EV_DIR_RISING));
iio_push_event(indio_dev, event, data->timestamp);
/* Reset the interrupt flag */
@@ -627,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev, "device_register failed\n");
- stk3310_set_state(data, STK3310_STATE_STANDBY);
- }
-
- if (client->irq <= 0)
+ if (client->irq < 0)
client->irq = stk3310_gpio_probe(client);
if (client->irq >= 0) {
@@ -648,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
client->irq);
}
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ stk3310_set_state(data, STK3310_STATE_STANDBY);
+ }
+
return ret;
}
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 71c2bde275aa..f8b1df018abe 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
if (val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
- if (val == tcs3414_times[i] * 1000) {
+ if (val2 == tcs3414_times[i] * 1000) {
data->timing &= ~TCS3414_INTEG_MASK;
data->timing |= i;
return i2c_smbus_write_byte_data(
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
config BMC150_MAGN
tristate "Bosch BMC150 Magnetometer Driver"
depends on I2C
+ select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..1347a1f2e46f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
goto err_poweroff;
}
if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
- dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret);
+ dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
ret = -ENODEV;
goto err_poweroff;
}
- dev_dbg(&data->client->dev, "Chip id %x\n", ret);
+ dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 7a2ea71c659a..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -84,10 +84,10 @@
#define MMC35240_OTP_START_ADDR 0x1B
enum mmc35240_resolution {
- MMC35240_16_BITS_SLOW = 0, /* 100 Hz */
- MMC35240_16_BITS_FAST, /* 200 Hz */
- MMC35240_14_BITS, /* 333 Hz */
- MMC35240_12_BITS, /* 666 Hz */
+ MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */
+ MMC35240_16_BITS_FAST, /* 4.08 ms */
+ MMC35240_14_BITS, /* 2.16 ms */
+ MMC35240_12_BITS, /* 1.20 ms */
};
enum mmc35240_axis {
@@ -100,22 +100,22 @@ static const struct {
int sens[3]; /* sensitivity per X, Y, Z axis */
int nfo; /* null field output */
} mmc35240_props_table[] = {
- /* 16 bits, 100Hz ODR */
+ /* 16 bits, 125Hz ODR */
{
{1024, 1024, 1024},
32768,
},
- /* 16 bits, 200Hz ODR */
+ /* 16 bits, 250Hz ODR */
{
{1024, 1024, 770},
32768,
},
- /* 14 bits, 333Hz ODR */
+ /* 14 bits, 450Hz ODR */
{
{256, 256, 193},
8192,
},
- /* 12 bits, 666Hz ODR */
+ /* 12 bits, 800Hz ODR */
{
{64, 64, 48},
2048,
@@ -133,9 +133,15 @@ struct mmc35240_data {
int axis_scale[3];
};
-static const int mmc35240_samp_freq[] = {100, 200, 333, 666};
+static const struct {
+ int val;
+ int val2;
+} mmc35240_samp_freq[] = { {1, 500000},
+ {13, 0},
+ {25, 0},
+ {50, 0} };
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666");
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50");
#define MMC35240_CHANNEL(_axis) { \
.type = IIO_MAGN, \
@@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data,
int i;
for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++)
- if (mmc35240_samp_freq[i] == val)
+ if (mmc35240_samp_freq[i].val == val &&
+ mmc35240_samp_freq[i].val2 == val2)
return i;
return -EINVAL;
}
@@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
coil_bit = MMC35240_CTRL0_RESET_BIT;
return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
- MMC35240_CTRL0_REFILL_BIT,
- coil_bit);
+ coil_bit, coil_bit);
+
}
static int mmc35240_init(struct mmc35240_data *data)
@@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
/*
* make sure we restore sensor characteristics, by doing
- * a RESET/SET sequence
+ * a SET/RESET sequence, the axis polarity being naturally
+ * aligned after RESET
*/
- ret = mmc35240_hw_set(data, false);
+ ret = mmc35240_hw_set(data, true);
if (ret < 0)
return ret;
usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
- ret = mmc35240_hw_set(data, true);
+ ret = mmc35240_hw_set(data, false);
if (ret < 0)
return ret;
@@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev,
if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq))
return -EINVAL;
- *val = mmc35240_samp_freq[i];
- *val2 = 0;
- return IIO_VAL_INT;
+ *val = mmc35240_samp_freq[i].val;
+ *val2 = mmc35240_samp_freq[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
}
data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
data->client = client;
data->regmap = regmap;
data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 2042e375f835..3d756bd8c703 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -80,6 +80,7 @@
#define SX9500_COMPSTAT_MASK GENMASK(3, 0)
#define SX9500_NUM_CHANNELS 4
+#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0)
struct sx9500_data {
struct mutex mutex;
@@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data,
if (ret < 0)
return ret;
- *val = 32767 - (s16)be16_to_cpu(regval);
+ *val = be16_to_cpu(regval);
return IIO_VAL_INT;
}
@@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data,
else
ret = sx9500_wait_for_sample(data);
- if (ret < 0)
- return ret;
-
mutex_lock(&data->mutex);
- ret = sx9500_read_prox_data(data, chan, val);
if (ret < 0)
- goto out;
+ goto out_dec_data_rdy;
- ret = sx9500_dec_chan_users(data, chan->channel);
+ ret = sx9500_read_prox_data(data, chan, val);
if (ret < 0)
- goto out;
+ goto out_dec_data_rdy;
ret = sx9500_dec_data_rdy_users(data);
if (ret < 0)
+ goto out_dec_chan;
+
+ ret = sx9500_dec_chan_users(data, chan->channel);
+ if (ret < 0)
goto out;
ret = IIO_VAL_INT;
goto out;
+out_dec_data_rdy:
+ sx9500_dec_data_rdy_users(data);
out_dec_chan:
sx9500_dec_chan_users(data, chan->channel);
out:
@@ -679,7 +682,7 @@ out:
static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret = 0, i;
mutex_lock(&data->mutex);
@@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret = 0, i;
iio_triggered_buffer_predisable(indio_dev);
@@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
unsigned int val;
ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- GENMASK(SX9500_NUM_CHANNELS, 0),
- GENMASK(SX9500_NUM_CHANNELS, 0));
+ SX9500_CHAN_MASK, SX9500_CHAN_MASK);
if (ret < 0)
return ret;
@@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
out:
regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- GENMASK(SX9500_NUM_CHANNELS, 0), 0);
+ SX9500_CHAN_MASK, 0);
return ret;
}
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..7a2b639eaa96 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 13657;
+ *val = -13657;
*val2 = 500000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index fcc49f89b946..8f21f32f9739 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
struct tmp006_data *data = iio_priv(indio_dev);
int i;
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
if ((val == tmp006_freqs[i][0]) &&
(val2 == tmp006_freqs[i][1])) {
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index c7dcfe4ca5f1..0429040304fd 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
struct ib_ah *ah;
struct ib_mad_send_wr_private *mad_send_wr;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(device))
port_priv = ib_get_agent_port(device, 0);
else
port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dbddddd6fb5d..3a972ebf3c0d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -169,6 +169,7 @@ struct cm_device {
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
+ int going_down;
struct cm_port *port[0];
};
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
unsigned long flags;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+ if (!cm_dev)
+ return;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
- queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
- msecs_to_jiffies(wait_time));
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down)
+ queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ msecs_to_jiffies(wait_time));
+ spin_unlock_irq(&cm.lock);
+
cm_id_priv->timewait_info = NULL;
}
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
struct cm_work *work;
unsigned long flags;
int ret = 0;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+ if (!cm_dev)
+ return -ENODEV;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down) {
+ queue_delayed_work(cm.wq, &work->work, 0);
+ } else {
+ kfree(work);
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&cm.lock);
+
out:
return ret;
}
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
+ int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!port->cm_dev->going_down)
+ queue_delayed_work(cm.wq, &work->work, 0);
+ else
+ going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+ if (going_down) {
+ kfree(work);
+ ib_free_recv_mad(mad_recv_wc);
+ }
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
-
+ cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
+ spin_lock_irq(&cm.lock);
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
- ib_unregister_mad_agent(port->mad_agent);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
flush_workqueue(cm.wq);
+ ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66c1a..22a3abee2a54 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto pid_query_error;
}
- if (iwpm_registered_client(nl_client))
+ if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+ iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
- iwpm_set_registered(nl_client, 1);
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
err_str = "Unable to send a nlmsg";
goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto add_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto add_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
if (!skb) {
err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto query_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto query_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
ret = -ENOMEM;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
err_str = "Invalid port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
err_str = "Unregistered port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
if (!skb) {
ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
if (iwpm_valid_client(nl_client))
- iwpm_set_registered(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
const char *msg_type = "Mapping Info response";
- int iwpm_pid;
u8 nl_client;
char *iwpm_name;
u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
__func__, nl_client);
return ret;
}
- iwpm_set_registered(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
if (!iwpm_mapinfo_available())
return 0;
- iwpm_pid = cb->nlh->nlmsg_pid;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
- __func__, iwpm_pid);
- ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+ __func__, iwpm_user_pid);
+ ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf9c7..5fb089e91353 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
mutex_unlock(&iwpm_admin_lock);
if (!ret) {
iwpm_set_valid(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
pr_debug("%s: Mapinfo and reminfo tables are created\n",
__func__);
}
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
}
/* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
{
return iwpm_admin.reg_list[nl_client];
}
/* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
{
iwpm_admin.reg_list[nl_client] = reg;
}
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+ return (iwpm_get_registration(nl_client) & reg);
+}
+
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
struct sockaddr_storage *b_sockaddr)
{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff095be..b7b9e194ce81 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
#define IWPM_PID_UNDEFINED -1
#define IWPM_PID_UNAVAILABLE -2
+#define IWPM_REG_UNDEF 0x01
+#define IWPM_REG_VALID 0x02
+#define IWPM_REG_INCOMPL 0x04
+
struct iwpm_nlmsg_request {
struct list_head inprocess_list;
__u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
atomic_t refcount;
atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS];
- int reg_list[RDMA_NL_NUM_CLIENTS];
+ u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
void iwpm_set_valid(u8 nl_client, int valid);
/**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ * matches the given one
* @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
*
* Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
*/
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
* @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
*/
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
/**
* iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4b1466c1bf6..786fc51bf04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
mad_agent_priv->qp_info->port_priv->port_num);
- if (device->node_type == RDMA_NODE_IB_SWITCH &&
+ if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
port_num = send_wr->wr.ud.port_num;
else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE &&
- opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
+ opa_smi_handle_dr_smp_send(opa_smp,
+ rdma_cap_ib_switch(device),
port_num) == IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid directed route\n");
goto out;
}
opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
- if (opa_drslid != OPA_LID_PERMISSIVE &&
+ if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
opa_drslid & 0xffff0000) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
} else {
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
+ smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
struct ib_smp *smp = (struct ib_smp *)recv->mad;
if (smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
struct opa_smp *smp = (struct opa_smp *)recv->mad;
if (opa_smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (opa_smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
goto out;
}
- if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
else
port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
static void ib_mad_init_device(struct ib_device *device)
{
- int start, end, i;
+ int start, i;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ start = rdma_start_port(device);
- for (i = start; i <= end; i++) {
+ for (i = start; i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
@@ -3342,17 +3337,9 @@ error:
static void ib_mad_remove_device(struct ib_device *device)
{
- int start, end, i;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ int i;
- for (i = start; i <= end; i++) {
+ for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1244f02a5c6d..2cb865c7ce7a 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
if (!dev)
return;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- dev->start_port = dev->end_port = 0;
- else {
- dev->start_port = 1;
- dev->end_port = device->phys_port_cnt;
- }
+ dev->start_port = rdma_start_port(device);
+ dev->end_port = rdma_end_port(device);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
if (!rdma_cap_ib_mcast(device, dev->start_port + i))
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 62d91bfa4cb7..3bfab3505a29 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -39,12 +39,12 @@
#include "smi.h"
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0fae85062a65..ca919f429666 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
int s, e, i;
int count = 0;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- s = e = 0;
- else {
- s = 1;
- e = device->phys_port_cnt;
- }
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 368a561d1a5d..f19b23817c2b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-9:2 */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == hop_cnt) {
/* return_path set when received */
(*hop_ptr)++;
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
(*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == 1) {
(*hop_ptr)--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_slid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-9:2 -- intermediate hop */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return_path[*hop_ptr] = port_num;
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return IB_SMI_HANDLE;
}
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ?
- IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac49b4..33c91c8a16e9 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
};
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index ed6b6c85c334..0b84a9cdfe5b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
goto err_put;
}
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 62c24b1452b8..009481073644 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map);
else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
kfree(ucm_dev);
}
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static int find_overflow_devnum(void)
{
int ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ad45469f7582..29b21213ea75 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) {
mutex_lock(&file1->mut);
- mutex_lock(&file2->mut);
+ mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&file2->mut);
- mutex_lock(&file1->mut);
+ mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
}
}
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
+ idr_destroy(&multicast_idr);
}
module_init(ucma_init);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
/*
* T3 only supports 32 bits of size.
*/
+ if (sizeof(phys_addr_t) > 4) {
+ pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
bl.size = 0xffffffff;
bl.addr = 0;
kva = 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
- ret = -EINVAL;
+ wc->status = IB_WC_FATAL_ERR;
}
}
out:
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 12b5bc23832b..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 bar0 = 0, bar1 = 0;
#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+ if (pat_enabled()) {
+ pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
ret = -ENODEV;
goto bail;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 948188e37f95..ad3a926ab3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 48253b839a6f..30ba49c4a98c 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock);
- idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+ idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
/* Set the prefix to the default value (see ch. 4.1.1) */
- idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+ idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 85a50df2f203..68b3dfa922bf 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
- switch (rdma_port_get_link_layer(ibdev, port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- if (!mlx4_is_slave(dev->dev))
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- case IB_LINK_LAYER_ETHERNET:
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- default:
- return -EINVAL;
+ /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+ * queries, should be called only by VFs and for that specific purpose
+ */
+ if (link == IB_LINK_LAYER_INFINIBAND) {
+ if (mlx4_is_slave(dev->dev) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
}
+
+ if (link == IB_LINK_LAYER_ETHERNET)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return -EINVAL;
}
static void send_handler(struct ib_mad_agent *agent,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..8be6db816460 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
- if (err)
- goto out;
+ if (!mlx4_is_slave(dev->dev))
+ err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
- resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset);
- resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ if (!err && !mlx4_is_slave(dev->dev)) {
+ resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ }
}
if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
- goto out;
+ return;
}
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
- for (i = 0; i < dev->caps.num_ports; i++) {
- if (dm[i])
- kfree(dm[i]);
- }
+ while (--i >= 0)
+ kfree(dm[i]);
goto out;
}
- }
- /* initialize or tear down tunnel QPs for the slave */
- for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
dm[i]->port = first_port + i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
- spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
- if (!ibdev->sriov.is_going_down)
+ }
+ /* initialize or tear down tunnel QPs for the slave */
+ spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+ if (!ibdev->sriov.is_going_down) {
+ for (i = 0; i < ports; i++)
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ for (i = 0; i < ports; i++)
+ kfree(dm[i]);
}
out:
kfree(dm);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 01fc97db45d6..b84d13a487cc 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6b2418b74c99..7c3f2fb44ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9047af429906..8a3ad170d790 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
int rc = arpindex;
struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+ __be32 dst_ipaddr = htonl(dst_ip);
- rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
if (neigh) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 02120d340d50..4713dd7ed764 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
(((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
(((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
- (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+ (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
} else {
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_H__
#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 4bafa15708d0..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <net/neighbour.h>
#include <net/netevent.h>
@@ -215,8 +230,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_AH_H__
#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/sched.h>
#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_HW_H__
#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 8a1398b253a2..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/module.h>
#include <linux/idr.h>
@@ -46,7 +61,7 @@
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
static LIST_HEAD(ocrdma_dev_list);
static DEFINE_SPINLOCK(ocrdma_devlist_lock);
@@ -696,6 +711,7 @@ static void __exit ocrdma_exit_module(void)
ocrdma_unregister_inet6addr_notifier();
ocrdma_unregister_inetaddr_notifier();
ocrdma_rem_debugfs();
+ idr_destroy(&ocrdma_dev_id);
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_SLI_H__
#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2014 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <rdma/ib_addr.h>
#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2014 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_STATS_H__
#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/dma-mapping.h>
#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 05e3242d8442..9625e7c438e5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bd94b0a6e9e5..79859c4d43c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
struct net_device *dev;
struct ipoib_neigh *neigh;
struct ipoib_path *path;
- struct ipoib_cm_tx_buf *tx_ring;
+ struct ipoib_tx_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
unsigned long flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ int i, off;
+ struct sk_buff *skb = tx_req->skb;
+ skb_frag_t *frags = skb_shinfo(skb)->frags;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ u64 *mapping = tx_req->mapping;
+
+ if (skb_headlen(skb)) {
+ priv->tx_sge[0].addr = mapping[0];
+ priv->tx_sge[0].length = skb_headlen(skb);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < nr_frags; ++i) {
+ priv->tx_sge[i + off].addr = mapping[i + off];
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+ }
+ priv->tx_wr.num_sge = nr_frags + off;
+}
+
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index cf32a778e7d0..ee39be6ccfb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -694,14 +694,12 @@ repost:
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
- u64 addr, int len)
+ struct ipoib_tx_buf *tx_req)
{
struct ib_send_wr *bad_wr;
- priv->tx_sge[0].addr = addr;
- priv->tx_sge[0].length = len;
+ ipoib_build_sge(priv, tx_req);
- priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_cm_tx_buf *tx_req;
- u64 addr;
+ struct ipoib_tx_buf *tx_req;
int rc;
if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
- tx_req->mapping = addr;
-
skb_orphan(skb);
skb_dst_drop(skb);
- rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
- addr, skb->len);
+ rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &tx->tx_ring[wr_id];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
+ if (dev->features & NETIF_F_SG)
+ attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
- DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
}
-
static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 63b92cbb29ad..d266667ca9b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -263,8 +263,7 @@ repost:
"for buf %d\n", wr_id);
}
-static int ipoib_dma_map_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
return -EIO;
}
-static void ipoib_dma_unmap_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
int off;
if (skb_headlen(skb)) {
- ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+ DMA_TO_DEVICE);
off = 1;
} else
off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
- DMA_TO_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[i + off],
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
}
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void *head, int hlen)
{
struct ib_send_wr *bad_wr;
- int i, off;
struct sk_buff *skb = tx_req->skb;
- skb_frag_t *frags = skb_shinfo(skb)->frags;
- int nr_frags = skb_shinfo(skb)->nr_frags;
- u64 *mapping = tx_req->mapping;
- if (skb_headlen(skb)) {
- priv->tx_sge[0].addr = mapping[0];
- priv->tx_sge[0].length = skb_headlen(skb);
- off = 1;
- } else
- off = 0;
+ ipoib_build_sge(priv, tx_req);
- for (i = 0; i < nr_frags; ++i) {
- priv->tx_sge[i + off].addr = mapping[i + off];
- priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
- }
- priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
--priv->tx_outstanding;
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
--priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level)
+ enum ipoib_flush_level level,
+ int nesting)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read(&priv->vlan_rwsem);
+ down_read_nested(&priv->vlan_rwsem, nesting);
/*
* Flush any child interfaces too -- they might be up even if
* the parent is down.
*/
list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level);
+ __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
up_read(&priv->vlan_rwsem);
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index da149c278cb8..b2943c84a5dd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
- features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
return features;
}
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
+ dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device);
priv->dev->dev_id = port - 1;
- if (!ib_query_port(hca, port, &attr))
+ result = ib_query_port(hca, port, &attr);
+ if (!result)
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
}
- if (ipoib_set_dev_features(priv, hca))
+ result = ipoib_set_dev_features(priv, hca);
+ if (result)
goto device_init_failed;
/*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
- int s, e, p;
+ int p;
int count = 0;
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
INIT_LIST_HEAD(dev_list);
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
- } else {
- s = 1;
- e = device->phys_port_cnt;
- }
-
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
- goto out_free_wq;
+ if (ret != -ENOSYS)
+ goto out_free_wq;
cq_attr.cqe = size;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = isert_rdma_post_recvl(isert_conn);
if (ret)
goto out_conn_dev;
+ /*
+ * Obtain the second reference now before isert_rdma_accept() to
+ * ensure that any initiator generated REJECT CM event that occurs
+ * asynchronously won't drop the last reference until the error path
+ * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
+ * isert_free_conn() -> isert_put_conn() -> kref_put().
+ */
+ if (!kref_get_unless_zero(&isert_conn->kref)) {
+ isert_warn("conn %p connect_release is running\n", isert_conn);
+ goto out_conn_dev;
+ }
ret = isert_rdma_accept(isert_conn);
if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
isert_info("conn %p\n", isert_conn);
- if (!kref_get_unless_zero(&isert_conn->kref)) {
- isert_warn("conn %p connect_release is running\n", isert_conn);
- return;
- }
-
mutex_lock(&isert_conn->mutex);
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 267dc4f75502..31a20b462266 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{
int tmo, res;
- if (strncmp(val, "off", 3) != 0) {
- res = kstrtoint(val, 0, &tmo);
- if (res)
- goto out;
- } else {
- tmo = -1;
- }
+ res = srp_parse_tmo(&tmo, val);
+ if (res)
+ goto out;
+
if (kp->arg == &srp_reconnect_delay)
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
struct srp_device *srp_dev;
struct ib_device_attr *dev_attr;
struct srp_host *host;
- int mr_page_shift, s, e, p;
+ int mr_page_shift, p;
u64 max_pages_per_mr;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->mr))
goto err_pd;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
- } else {
- s = 1;
- e = device->phys_port_cnt;
- }
-
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 82897ca17f32..60ff0a2390e5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
int i;
ioui = (struct ib_dm_iou_info *)mad->data;
- ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->change_id = cpu_to_be16(1);
ioui->max_controllers = 16;
/* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
iocp->subsys_device_id = 0x0;
- iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
- iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
- iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
- iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2 || lo > hi || hi > 1) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
break;
default:
rsp_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
}
}
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
break;
case IB_MGMT_METHOD_SET:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
default:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
break;
}
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
- __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->status = status;
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
- srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
- + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->req_lim_delta =
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
switch (len) {
case 8:
if ((*((__be64 *)lun) &
- __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
goto out_err;
break;
case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
}
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because its"
" length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
}
if (!sport->enabled) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because the target port"
" has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
ret = -ENOMEM;
pr_err("rejected SRP_LOGIN_REQ because it"
" has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch = kzalloc(sizeof *ch, GFP_KERNEL);
if (!ch) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
ret = -ENOMEM;
goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = srpt_ch_qp_rtr(ch, ch->qp);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling"
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!nacl) {
pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name);
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
}
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_debug("Failed to create session\n");
goto deregister_session;
}
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
reject:
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof *rej);
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
{
}
+static int input_leds_get_count(struct input_dev *dev)
+{
+ unsigned int led_code;
+ int count = 0;
+
+ for_each_set_bit(led_code, dev->ledbit, LED_CNT)
+ if (input_led_info[led_code].name)
+ count++;
+
+ return count;
+}
+
static int input_leds_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
int led_no;
int error;
- num_leds = bitmap_weight(dev->ledbit, LED_CNT);
+ num_leds = input_leds_get_count(dev);
if (!num_leds)
return -ENXIO;
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
led->handle = &leds->handle;
led->code = led_code;
- if (WARN_ON(!input_led_info[led_code].name))
+ if (!input_led_info[led_code].name)
continue;
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
if (n_buttons[i] < 1)
continue;
- if (n_buttons[i] > 6) {
+ if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
err = -EINVAL;
goto err_unreg_devs;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..c6dc644aa580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
- unsigned flags = 0;
+ unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
MODULE_DESCRIPTION("axp20x Power Button");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
if (pdata && pdata->coexist)
return true;
- if (of_find_node_by_name(node, "codec")) {
+ node = of_find_node_by_name(node, "codec");
+ if (node) {
of_node_put(node);
return true;
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <linux/dmi.h>
#include "psmouse.h"
#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
+#define ALPS_DELL 0x100 /* device is a Dell laptop */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
return;
}
- /* Non interleaved V2 dualpoint has separate stick button bits */
+ /* Dell non interleaved V2 dualpoint has separate stick button bits */
if (priv->proto_version == ALPS_PROTO_V2 &&
- priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+ priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
left |= packet[0] & 1;
right |= packet[0] & 2;
middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->byte0 = protocol->byte0;
priv->mask0 = protocol->mask0;
priv->flags = protocol->flags;
+ if (dmi_name_in_vendors("Dell"))
+ priv->flags |= ALPS_DELL;
priv->x_max = 2000;
priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
* Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
*
* Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
+ * Copyright (C) 2015 John Horan (knasher@gmail.com)
*
* The USB initialization and package decoding was made by
* Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
+/* MacbookPro12,1 (2015) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+ /* MacbookPro12,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
/* Terminating entry */
{}
};
@@ -180,21 +189,47 @@ struct bt_data {
enum tp_type {
TYPE1, /* plain trackpad */
TYPE2, /* button integrated in trackpad */
- TYPE3 /* additional header fields since June 2013 */
+ TYPE3, /* additional header fields since June 2013 */
+ TYPE4 /* additional header field for pressure data */
};
/* trackpad finger data offsets, le16-aligned */
-#define FINGER_TYPE1 (13 * sizeof(__le16))
-#define FINGER_TYPE2 (15 * sizeof(__le16))
-#define FINGER_TYPE3 (19 * sizeof(__le16))
+#define HEADER_TYPE1 (13 * sizeof(__le16))
+#define HEADER_TYPE2 (15 * sizeof(__le16))
+#define HEADER_TYPE3 (19 * sizeof(__le16))
+#define HEADER_TYPE4 (23 * sizeof(__le16))
/* trackpad button data offsets */
+#define BUTTON_TYPE1 0
#define BUTTON_TYPE2 15
#define BUTTON_TYPE3 23
+#define BUTTON_TYPE4 31
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
+/* trackpad finger data block size */
+#define FSIZE_TYPE1 (14 * sizeof(__le16))
+#define FSIZE_TYPE2 (14 * sizeof(__le16))
+#define FSIZE_TYPE3 (14 * sizeof(__le16))
+#define FSIZE_TYPE4 (15 * sizeof(__le16))
+
+/* offset from header to finger struct */
+#define DELTA_TYPE1 (0 * sizeof(__le16))
+#define DELTA_TYPE2 (0 * sizeof(__le16))
+#define DELTA_TYPE3 (0 * sizeof(__le16))
+#define DELTA_TYPE4 (1 * sizeof(__le16))
+
+/* usb control message mode switch data */
+#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
+
/* trackpad finger structure, le16-aligned */
struct tp_finger {
__le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
__le16 orientation; /* 16384 when point, else 15 bit angle */
__le16 touch_major; /* touch area, major axis */
__le16 touch_minor; /* touch area, minor axis */
- __le16 unused[3]; /* zeros */
+ __le16 unused[2]; /* zeros */
+ __le16 pressure; /* pressure on forcetouch touchpad */
__le16 multi; /* one finger: varies, more fingers: constant */
} __attribute__((packed,aligned(2)));
/* trackpad finger data size, empirically at least ten fingers */
#define MAX_FINGERS 16
-#define SIZEOF_FINGER sizeof(struct tp_finger)
-#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
int bt_datalen; /* data length of the button interface */
int tp_ep; /* the endpoint of the trackpad interface */
enum tp_type tp_type; /* type of trackpad interface */
- int tp_offset; /* offset to trackpad finger data */
+ int tp_header; /* bytes in header block */
int tp_datalen; /* data length of the trackpad interface */
+ int tp_button; /* offset to button data */
+ int tp_fsize; /* bytes in single finger block */
+ int tp_delta; /* offset from header to finger struct */
+ int um_size; /* usb control message length */
+ int um_req_val; /* usb control message value */
+ int um_req_idx; /* usb control message index */
+ int um_switch_idx; /* usb control message mode switch index */
+ int um_switch_on; /* usb control message mode switch on */
+ int um_switch_off; /* usb control message mode switch off */
struct bcm5974_param p; /* finger pressure limits */
struct bcm5974_param w; /* finger width limits */
struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
int slots[MAX_FINGERS]; /* slot assignments */
};
+/* trackpad finger block data, le16-aligned */
+static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
+{
+ const struct bcm5974_config *c = &dev->cfg;
+ u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
+
+ return (const struct tp_finger *)(f_base + i * c->tp_fsize);
+}
+
+#define DATAFORMAT(type) \
+ type, \
+ HEADER_##type, \
+ HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
+ BUTTON_##type, \
+ FSIZE_##type, \
+ DELTA_##type, \
+ USBMSG_##type
+
/* logical signal quality */
#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
0,
0x84, sizeof(struct bt_data),
- 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE1),
{ SN_PRESSURE, 0, 256 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
0,
0x84, sizeof(struct bt_data),
- 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE1),
{ SN_PRESSURE, 0, 256 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
HAS_INTEGRATED_BUTTON,
0, sizeof(struct bt_data),
- 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS,
+ 0x83, DATAFORMAT(TYPE3),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
{ SN_COORD, -150, 6600 },
{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0, sizeof(struct bt_data),
+ 0x83, DATAFORMAT(TYPE4),
+ { SN_PRESSURE, 0, 300 },
+ { SN_WIDTH, 0, 2048 },
+ { SN_COORD, -4828, 5345 },
+ { SN_COORD, -203, 6803 },
+ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+ },
{}
};
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
struct input_dev *input = dev->input;
int raw_n, i, n = 0;
- if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0)
+ if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
return -EIO;
- /* finger data, le16-aligned */
- f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
- raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
+ raw_n = (size - c->tp_header) / c->tp_fsize;
for (i = 0; i < raw_n; i++) {
- if (raw2int(f[i].touch_major) == 0)
+ f = get_tp_finger(dev, i);
+ if (raw2int(f->touch_major) == 0)
continue;
- dev->pos[n].x = raw2int(f[i].abs_x);
- dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y);
- dev->index[n++] = &f[i];
+ dev->pos[n].x = raw2int(f->abs_x);
+ dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
+ dev->index[n++] = f;
}
input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
input_mt_sync_frame(input);
- report_synaptics_data(input, c, f, raw_n);
+ report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
- /* type 2 reports button events via ibt only */
- if (c->tp_type == TYPE2) {
- int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+ /* later types report button events via integrated button only */
+ if (c->caps & HAS_INTEGRATED_BUTTON) {
+ int ibt = raw2int(dev->tp_data[c->tp_button]);
input_report_key(input, BTN_LEFT, ibt);
}
- if (c->tp_type == TYPE3)
- input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
-
input_sync(input);
return 0;
}
-/* Wellspring initialization constants */
-#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
-#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
-#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
-#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
-#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
-#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
-
static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
{
+ const struct bcm5974_config *c = &dev->cfg;
int retval = 0, size;
char *data;
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
if (dev->cfg.tp_type == TYPE3)
return 0;
- data = kmalloc(8, GFP_KERNEL);
+ data = kmalloc(c->um_size, GFP_KERNEL);
if (!data) {
dev_err(&dev->intf->dev, "out of memory\n");
retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
- BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+ c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
- if (size != 8) {
+ if (size != c->um_size) {
dev_err(&dev->intf->dev, "could not read from device\n");
retval = -EIO;
goto out;
}
/* apply the mode switch */
- data[0] = on ?
- BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
- BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
+ data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
/* write configuration */
size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
- BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+ c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
- if (size != 8) {
+ if (size != c->um_size) {
dev_err(&dev->intf->dev, "could not write to device\n");
retval = -EIO;
goto out;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
unsigned char packet_type = packet[3] & 0x03;
+ unsigned int ic_version;
bool sanity_check;
if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
return PACKET_TRACKPOINT;
+ /* This represents the version of IC body. */
+ ic_version = (etd->fw_version & 0x0f0000) >> 16;
+
/*
* Sanity check based on the constant bits of a packet.
* The constant bits change depending on the value of
- * the hardware flag 'crc_enabled' but are the same for
- * every packet, regardless of the type.
+ * the hardware flag 'crc_enabled' and the version of
+ * the IC body, but are the same for every packet,
+ * regardless of the type.
*/
if (etd->crc_enabled)
sanity_check = ((packet[3] & 0x08) == 0x00);
+ else if (ic_version == 7 && etd->samples[1] == 0x2A)
+ sanity_check = ((packet[3] & 0x1c) == 0x10);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
(packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1167,7 +1175,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
- unsigned int x_res = 0, y_res = 0;
+ unsigned int x_res = 31, y_res = 31;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -1232,8 +1240,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_X, x_res);
- input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1252,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
- input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1259,6 +1263,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
}
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
+ if (etd->hw_version > 1) {
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
+ }
+
etd->y_max = y_max;
etd->width = width;
@@ -1648,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
etd->capabilities[0], etd->capabilities[1],
etd->capabilities[2]);
+ if (etd->hw_version != 1) {
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
+ psmouse_err(psmouse, "failed to query sample data\n");
+ goto init_fail;
+ }
+ psmouse_info(psmouse,
+ "Elan sample query result %02x, %02x, %02x\n",
+ etd->samples[0], etd->samples[1], etd->samples[2]);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities[3];
+ unsigned char samples[3];
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3a32caf06bf1..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
psmouse_info(psmouse,
- "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
+ "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
SYN_ID_MODEL(priv->identity),
SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
priv->model_id,
priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
- priv->board_id, priv->firmware_id);
+ priv->ext_cap_10, priv->board_id, priv->firmware_id);
set_input_params(psmouse, priv);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
int abs_y_max;
unsigned int max_touch_num;
unsigned int int_trigger_type;
+ bool rotated_screen;
};
#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_LEVEL_HIGH,
};
+/*
+ * Those tablets have their coordinates origin at the bottom right
+ * of the tablet, as if rotated 180 degrees
+ */
+static const struct dmi_system_id rotated_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "WinBook TW100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+ }
+ },
+ {
+ .ident = "WinBook TW700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+ },
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
int input_y = get_unaligned_le16(&coor_data[3]);
int input_w = get_unaligned_le16(&coor_data[5]);
+ if (ts->rotated_screen) {
+ input_x = ts->abs_x_max - input_x;
+ input_y = ts->abs_y_max - input_y;
+ }
+
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
ts->abs_y_max = GOODIX_MAX_HEIGHT;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
}
+
+ ts->rotated_screen = dmi_check_system(rotated_screen);
+ if (ts->rotated_screen)
+ dev_dbg(&ts->client->dev,
+ "Applying '180 degrees rotated screen' quirk\n");
}
/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
goto err_out;
}
+ /* TSC-25 data sheet specifies a delay after the RESET command */
+ msleep(150);
+
/* set coordinate output rate */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
-static struct protection_domain *pt_domain;
-
static const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Default for device is pt_domain */
+ bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
-static int alloc_passthrough_domain(void);
static int protection_domain_init(struct protection_domain *domain);
/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
- if (!dev_data->iommu_v2 || !dev_data->passthrough)
+ if (!dev_data->passthrough)
return -EINVAL;
- if (pdev_iommuv2_enable(pdev) != 0)
- return -EINVAL;
+ if (dev_data->iommu_v2) {
+ if (pdev_iommuv2_enable(pdev) != 0)
+ return -EINVAL;
- dev_data->ats.enabled = true;
- dev_data->ats.qdep = pci_ats_queue_depth(pdev);
- dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ }
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
do_detach(head);
spin_unlock_irqrestore(&domain->lock, flags);
-
- /*
- * If we run in passthrough mode the device must be assigned to the
- * passthrough domain if it is detached from any other domain.
- * Make sure we can deassign from the pt_domain itself.
- */
- if (dev_data->passthrough &&
- (dev_data->domain == NULL && domain != pt_domain))
- __attach_device(dev_data, pt_domain);
}
/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- if (domain->flags & PD_IOMMUV2_MASK)
+ if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
pdev_iommuv2_disable(to_pci_dev(dev));
else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
BUG_ON(!dev_data);
- if (dev_data->iommu_v2)
+ if (iommu_pass_through || dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- dev->archdata.dma_ops = &nommu_dma_ops;
- } else {
+ else
dev->archdata.dma_ops = &amd_iommu_dma_ops;
- }
out:
iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
+ swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
- swiotlb = 0;
+
+ /*
+ * In case we don't initialize SWIOTLB (actually the common case
+ * when AMD IOMMU is enabled), make sure there are global
+ * dma_ops set as a fall-back for devices not handled by this
+ * driver (for example non-PCI devices).
+ */
+ if (!swiotlb)
+ dma_ops = &nommu_dma_ops;
amd_iommu_stats_init();
@@ -2947,21 +2944,6 @@ out_err:
return NULL;
}
-static int alloc_passthrough_domain(void)
-{
- if (pt_domain != NULL)
- return 0;
-
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode = PAGE_MODE_NONE;
-
- return 0;
-}
-
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
*
*****************************************************************************/
-int __init amd_iommu_init_passthrough(void)
-{
- struct iommu_dev_data *dev_data;
- struct pci_dev *dev = NULL;
- int ret;
-
- ret = alloc_passthrough_domain();
- if (ret)
- return ret;
-
- for_each_pci_dev(dev) {
- if (!check_device(&dev->dev))
- continue;
-
- dev_data = get_dev_data(&dev->dev);
- dev_data->passthrough = true;
-
- attach_device(&dev->dev, pt_domain);
- }
-
- amd_iommu_stats_init();
-
- pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
-
- return 0;
-}
-
/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
struct amd_iommu *iommu;
int qdep;
- BUG_ON(!dev_data->ats.enabled);
+ /*
+ There might be non-IOMMUv2 capable devices in an IOMMUv2
+ * domain.
+ */
+ if (!dev_data->ats.enabled)
+ continue;
qdep = dev_data->ats.qdep;
iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
return true;
}
-static int amd_iommu_init_dma(void)
-{
- if (iommu_pass_through)
- return amd_iommu_init_passthrough();
- else
- return amd_iommu_init_dma_ops();
-}
-
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
- ret = amd_iommu_init_dma();
+ ret = amd_iommu_init_dma_ops();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break;
case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
static void free_device_state(struct device_state *dev_state)
{
+ struct iommu_group *group;
+
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
*/
- iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+ group = iommu_group_get(&dev_state->pdev->dev);
+ if (WARN_ON(!group))
+ return;
+
+ iommu_detach_group(dev_state->domain, group);
+
+ iommu_group_put(group);
/* Everything is down now, free the IOMMUv2 domain */
iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
{
struct device_state *dev_state;
+ struct iommu_group *group;
unsigned long flags;
int ret, tmp;
u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (ret)
goto out_free_domain;
- ret = iommu_attach_device(dev_state->domain, &pdev->dev);
- if (ret != 0)
+ group = iommu_group_get(&pdev->dev);
+ if (!group)
goto out_free_domain;
+ ret = iommu_attach_group(dev_state->domain, group);
+ if (ret != 0)
+ goto out_drop_group;
+
+ iommu_group_put(group);
+
spin_lock_irqsave(&state_lock, flags);
if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
return 0;
+out_drop_group:
+ iommu_group_put(group);
+
out_free_domain:
iommu_domain_free(dev_state->domain);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
* Stream table.
*
* Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
*/
-#define STRTAB_L1_SZ_SHIFT 16
+#define STRTAB_L1_SZ_SHIFT 20
#define STRTAB_SPLIT 8
#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
#define ARM64_TCR_TG0_SHIFT 14
#define ARM64_TCR_TG0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_SHIFT 24
+#define ARM64_TCR_IRGN0_SHIFT 8
#define ARM64_TCR_IRGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_SHIFT 26
+#define ARM64_TCR_ORGN0_SHIFT 10
#define ARM64_TCR_ORGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
u32 features;
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+ u32 options;
+
struct arm_smmu_cmdq cmdq;
struct arm_smmu_evtq evtq;
struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+ { 0, NULL},
+};
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
/* Low-level queue manipulation functions */
static bool queue_full(struct arm_smmu_queue *q)
{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
- strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+ strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
- u32 size;
+ u32 size, l1size;
int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/* Calculate the L1 size, capped to the SIDSIZE */
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- if (size + STRTAB_SPLIT < smmu->sid_bits)
+ cfg->num_l1_ents = 1 << size;
+
+ size += STRTAB_SPLIT;
+ if (size < smmu->sid_bits)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
- size + STRTAB_SPLIT, smmu->sid_bits);
+ size, smmu->sid_bits);
- cfg->num_l1_ents = 1 << size;
- size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
- strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+ l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+ strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
ret = arm_smmu_init_l1_strtab(smmu);
if (ret)
dma_free_coherent(smmu->dev,
- cfg->num_l1_ents *
- (STRTAB_L1_DESC_DWORDS << 3),
+ l1size,
strtab,
cfg->strtab_dma);
return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
+ parse_driver_options(smmu);
+
/* Probe the h/w */
ret = arm_smmu_device_probe(smmu);
if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
struct page *freelist = NULL;
- int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
- iommu_detach_domain(domain, g_iommus[i]);
+ for_each_active_iommu(iommu, drhd)
+ if (domain_type_is_vm(domain) ||
+ test_bit(iommu->seq_id, domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
rcu_read_unlock();
dma_free_pagelist(freelist);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..c12bb93334ff 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_wake = irq_chip_set_wake_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1b7e155869f6..c00e2db351ba 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -75,6 +75,13 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+struct event_lpi_map {
+ unsigned long *lpi_map;
+ u16 *col_map;
+ irq_hw_number_t lpi_base;
+ int nr_lpis;
+};
+
/*
* The ITS view of a device - belongs to an ITS, a collection, owns an
* interrupt translation table, and a list of interrupts.
@@ -82,11 +89,8 @@ struct its_node {
struct its_device {
struct list_head entry;
struct its_node *its;
- struct its_collection *collection;
+ struct event_lpi_map event_map;
void *itt;
- unsigned long *lpi_map;
- irq_hw_number_t lpi_base;
- int nr_lpis;
u32 nr_ites;
u32 device_id;
};
@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+ u32 event)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->collections + its_dev->event_map.col_map[event];
+}
+
/*
* ITS command descriptors - parameters to be encoded in a command
* block.
@@ -134,7 +146,7 @@ struct its_cmd_desc {
struct {
struct its_device *dev;
struct its_collection *col;
- u32 id;
+ u32 event_id;
} its_movi_cmd;
struct {
@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
its_fixup_cmd(cmd);
- return desc->its_mapd_cmd.dev->collection;
+ return NULL;
}
static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_mapvi_cmd.dev,
+ desc->its_mapvi_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_MAPVI);
its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
- its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+ its_encode_collection(cmd, col->col_id);
its_fixup_cmd(cmd);
- return desc->its_mapvi_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_movi_cmd.dev,
+ desc->its_movi_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_MOVI);
its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
- its_encode_event_id(cmd, desc->its_movi_cmd.id);
+ its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
its_fixup_cmd(cmd);
- return desc->its_movi_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_discard_cmd.dev,
+ desc->its_discard_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_DISCARD);
its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
its_fixup_cmd(cmd);
- return desc->its_discard_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_INV);
its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
its_fixup_cmd(cmd);
- return desc->its_inv_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
desc.its_movi_cmd.dev = dev;
desc.its_movi_cmd.col = col;
- desc.its_movi_cmd.id = id;
+ desc.its_movi_cmd.event_id = id;
its_send_single_command(dev->its, its_build_movi_cmd, &desc);
}
@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static inline u32 its_get_event_id(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- return d->hwirq - its_dev->lpi_base;
+ return d->hwirq - its_dev->event_map.lpi_base;
}
static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
- its_dev->collection = target_col;
+ its_dev->event_map.col_map[id] = cpu;
return IRQ_SET_MASK_OK_DONE;
}
@@ -713,8 +745,10 @@ out:
return bitmap;
}
-static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(struct event_lpi_map *map)
{
+ int base = map->lpi_base;
+ int nr_ids = map->nr_lpis;
int lpi;
spin_lock(&lpi_lock);
@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
spin_unlock(&lpi_lock);
- kfree(bitmap);
+ kfree(map->lpi_map);
+ kfree(map->col_map);
}
/*
@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
+ u16 *col_map = NULL;
void *itt;
int lpi_base;
int nr_lpis;
int nr_ites;
- int cpu;
int sz;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ if (lpi_map)
+ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
- if (!dev || !itt || !lpi_map) {
+ if (!dev || !itt || !lpi_map || !col_map) {
kfree(dev);
kfree(itt);
kfree(lpi_map);
+ kfree(col_map);
return NULL;
}
dev->its = its;
dev->itt = itt;
dev->nr_ites = nr_ites;
- dev->lpi_map = lpi_map;
- dev->lpi_base = lpi_base;
- dev->nr_lpis = nr_lpis;
+ dev->event_map.lpi_map = lpi_map;
+ dev->event_map.col_map = col_map;
+ dev->event_map.lpi_base = lpi_base;
+ dev->event_map.nr_lpis = nr_lpis;
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
list_add(&dev->entry, &its->its_device_list);
raw_spin_unlock_irqrestore(&its->lock, flags);
- /* Bind the device to the first possible CPU */
- cpu = cpumask_first(cpu_online_mask);
- dev->collection = &its->collections[cpu];
-
/* Map device to its ITT */
its_send_mapd(dev, 1);
@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
{
int idx;
- idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
- if (idx == dev->nr_lpis)
+ idx = find_first_zero_bit(dev->event_map.lpi_map,
+ dev->event_map.nr_lpis);
+ if (idx == dev->event_map.nr_lpis)
return -ENOSPC;
- *hwirq = dev->lpi_base + idx;
- set_bit(idx, dev->lpi_map);
+ *hwirq = dev->event_map.lpi_base + idx;
+ set_bit(idx, dev->event_map.lpi_map);
return 0;
}
@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq, &its_irq_chip, its_dev);
dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
- (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+ (int)(hwirq - its_dev->event_map.lpi_base),
+ (int)hwirq, virq + i);
}
return 0;
@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ /* Bind the LPI to the first possible CPU */
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
}
@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
u32 event = its_get_event_id(data);
/* Mark interrupt index as unused */
- clear_bit(event, its_dev->lpi_map);
+ clear_bit(event, its_dev->event_map.lpi_map);
/* Nuke the entry in the domain */
irq_domain_reset_irq_data(data);
}
/* If all interrupts have been freed, start mopping the floor */
- if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
- its_lpi_free(its_dev->lpi_map,
- its_dev->lpi_base,
- its_dev->nr_lpis);
+ if (bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)) {
+ its_lpi_free(&its_dev->event_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b7d54d428b5e..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index a45121546caf..acb721b31bcf 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -2,7 +2,7 @@
* SPEAr platform shared irq layer source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Copyright (C) 2012 ST Microelectronics
* Shiraz Hashim <shiraz.linux.kernel@gmail.com>
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
cs->hw.ser->tty = tty;
atomic_set(&cs->hw.ser->refcnt, 1);
init_completion(&cs->hw.ser->dead_cmp);
-
tty->disc_data = cs;
+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
/* OK.. Initialization of the datastructures and the HW is done.. Now
* startup system and notify the LL that we are ready to run
*/
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
}
/*
- * Read on the tty.
- * Unused, received data goes only to the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
- * Write on the tty.
- * Unused, transmit data comes only from the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
* Ioctl on the tty.
* Called in process context only.
* May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
.open = gigaset_tty_open,
.close = gigaset_tty_close,
.hangup = gigaset_tty_hangup,
- .read = gigaset_tty_read,
- .write = gigaset_tty_write,
.ioctl = gigaset_tty_ioctl,
.receive_buf = gigaset_tty_receive,
.write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
the ciphers you're going to use in the cryptoapi configuration.
For further information on dm-crypt and userspace tools see:
- <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
+ <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
To compile this code as a module, choose M here: the module will
be called dm-crypt.
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index a08e3eeac3c5..79a6d63e8ed3 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \
- return; \
} while (0)
/**
@@ -349,7 +348,6 @@ do { \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \
- return; \
} while (0)
/**
@@ -365,7 +363,6 @@ do { \
do { \
set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
- return; \
} while (0)
/**
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index cb64e64a4789..bf6a9ca18403 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
+ return;
submit:
generic_make_request(bio);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ce64fc851251..418607a6ba33 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
if (!w->need_write) {
closure_return_with_destructor(cl, journal_write_unlock);
+ return;
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
continue_at(cl, journal_write, system_wq);
+ return;
}
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4afb2d26b148..f292790997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
if (journal_ref)
atomic_dec_bug(journal_ref);
- if (!op->insert_data_done)
+ if (!op->insert_data_done) {
continue_at(cl, bch_data_insert_start, op->wq);
+ return;
+ }
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0),
- op->c))
+ op->c)) {
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
+ }
k = op->insert_keys.top;
bkey_init(k);
@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
ret = bch_btree_map_keys(&s->op, s->iop.c,
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
continue_at(cl, cache_lookup, bcache_wq);
+ return;
+ }
closure_return(cl);
}
@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
+ return;
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
- bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+ bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bitmap->storage.sb_page == NULL)
return -ENOMEM;
bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->state = cpu_to_le32(bitmap->flags);
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+ bitmap->mddev->bitmap_info.nodes = 0;
kunmap_atomic(sb);
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
+ loff_t offset = bitmap->mddev->bitmap_info.offset;
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
- bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
- bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ bitmap->cluster_slot, offset);
}
if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
bitmap, bytes, sb_page);
} else {
err = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
+ offset,
sb_page,
0, sizeof(bitmap_super_t));
}
@@ -611,8 +613,16 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+ /* XXX: This is a hack to ensure that we don't use clustering
+ * in case:
+ * - dm-raid is in use and
+ * - the nodes written in bitmap_sb is erroneous.
+ */
+ if (!bitmap->mddev->sync_super) {
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
kunmap_atomic(sb);
/* Assiging chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
- if (nodes && (bitmap->cluster_slot < 0)) {
+ if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- rv = bitmap_read_sb(bitmap);
- if (rv)
- goto err;
-
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
goto err;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (from_cblock(cache_size)) {
mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
- if (!mq->cache_hit_bits && mq->cache_hit_bits) {
+ if (!mq->cache_hit_bits) {
DMERR("couldn't allocate cache hit bitset");
goto bad_cache_hit_bits;
}
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1947,6 +1947,7 @@ static int commit_if_needed(struct cache *cache)
static void process_deferred_bios(struct cache *cache)
{
+ bool prealloc_used = false;
unsigned long flags;
struct bio_list bios;
struct bio *bio;
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
bio_list_merge(&cache->deferred_bios, &bios);
@@ -1983,11 +1985,13 @@ static void process_deferred_bios(struct cache *cache)
process_bio(cache, &structs, bio);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
static void process_deferred_cells(struct cache *cache)
{
+ bool prealloc_used = false;
unsigned long flags;
struct dm_bio_prison_cell *cell, *tmp;
struct list_head cells;
@@ -2007,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
list_splice(&cells, &cache->deferred_cells);
@@ -2017,7 +2022,8 @@ static void process_deferred_cells(struct cache *cache)
process_cell(cache, &structs, cell);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -2062,7 +2068,7 @@ static void process_deferred_writethrough_bios(struct cache *cache)
static void writeback_some_dirty_blocks(struct cache *cache)
{
- int r = 0;
+ bool prealloc_used = false;
dm_oblock_t oblock;
dm_cblock_t cblock;
struct prealloc structs;
@@ -2072,15 +2078,12 @@ static void writeback_some_dirty_blocks(struct cache *cache)
memset(&structs, 0, sizeof(structs));
while (spare_migration_bandwidth(cache)) {
- if (prealloc_data_structs(cache, &structs))
- break;
-
- r = policy_writeback_work(cache->policy, &oblock, &cblock, busy);
- if (r)
- break;
+ if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
+ break; /* no work to do */
- r = get_cell(cache, oblock, &structs, &old_ocell);
- if (r) {
+ prealloc_used = true;
+ if (prealloc_data_structs(cache, &structs) ||
+ get_cell(cache, oblock, &structs, &old_ocell)) {
policy_set_dirty(cache->policy, oblock);
break;
}
@@ -2088,7 +2091,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
writeback(cache, &structs, oblock, cblock, old_ocell);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
/*----------------------------------------------------------------
@@ -3496,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
* <#demotions> <#promotions> <#dirty>
* <#features> <features>*
* <#core args> <core args>
- * <policy name> <#policy args> <policy args>* <cache metadata mode>
+ * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3582,6 +3586,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
+ if (dm_cache_metadata_needs_check(cache->cmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3820,7 +3829,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
return r;
disk_super = dm_block_data(copy);
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/rbtree.h>
@@ -268,7 +269,7 @@ struct pool {
process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard;
- struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
+ struct dm_bio_prison_cell **cell_sort_array;
};
static enum pool_mode get_pool_mode(struct pool *pool);
@@ -665,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
requeue_deferred_cells(tc);
}
-static void error_retry_list(struct pool *pool)
+static void error_retry_list_with_code(struct pool *pool, int error)
{
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list)
- error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
+ error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
rcu_read_unlock();
}
+static void error_retry_list(struct pool *pool)
+{
+ return error_retry_list_with_code(pool, -EIO);
+}
+
/*
* This section of code contains the logic for processing a thin device's IO.
* Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2281,18 +2287,23 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
- * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
*/
static void do_no_space_timeout(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool,
no_space_timeout);
- if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
- set_pool_mode(pool, PM_READ_ONLY);
+ if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+ pool->pf.error_if_no_space = true;
+ notify_of_pool_mode_change_to_oods(pool);
+ error_retry_list_with_code(pool, -ENOSPC);
+ }
}
/*----------------------------------------------------------------*/
@@ -2370,6 +2381,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
dm_device_name(pool->pool_md), new_mode);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+{
+ if (!pool->pf.error_if_no_space)
+ notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+ else
+ notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+}
+
static bool passdown_enabled(struct pool_c *pt)
{
return pt->adjusted_pf.discard_passdown;
@@ -2454,7 +2473,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* frequently seeing this mode.
*/
if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "out-of-data-space");
+ notify_of_pool_mode_change_to_oods(pool);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell_read_only;
@@ -2777,6 +2796,7 @@ static void __pool_destroy(struct pool *pool)
{
__pool_table_remove(pool);
+ vfree(pool->cell_sort_array);
if (dm_pool_metadata_close(pool->pmd) < 0)
DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
@@ -2889,6 +2909,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_mapping_pool;
}
+ pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+ if (!pool->cell_sort_array) {
+ *error = "Error allocating cell sort array";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_sort_array;
+ }
+
pool->ref_count = 1;
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
@@ -2897,6 +2924,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
+bad_sort_array:
+ mempool_destroy(pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
@@ -3714,6 +3743,7 @@ static void emit_flags(struct pool_features *pf, char *result,
* Status line is:
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
+ * <pool mode> <discard config> <no space config> <needs_check>
*/
static void pool_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3815,6 +3845,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("queue_if_no_space ");
+ if (dm_pool_metadata_needs_check(pool->pmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3918,7 +3953,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4305,7 +4340,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f331d888e7f5..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- int nr_requests_pending;
-
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- nr_requests_pending = md_in_flight(md);
- if (!nr_requests_pending)
+ if (!md_in_flight(md))
wake_up(&md->wait);
/*
@@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
if (run_queue) {
if (md->queue->mq_ops)
blk_mq_run_hw_queues(md->queue, true);
- else if (!nr_requests_pending ||
- (nr_requests_pending >= md->queue->nr_congestion_on))
+ else
blk_run_queue_async(md->queue);
}
@@ -1733,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table_fast(md);
struct dm_target *ti;
- sector_t max_sectors, max_size = 0;
+ sector_t max_sectors;
+ int max_size = 0;
if (unlikely(!map))
goto out;
@@ -1746,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
* Find maximum amount of I/O that won't need splitting
*/
max_sectors = min(max_io_len(bvm->bi_sector, ti),
- (sector_t) queue_max_sectors(q));
+ (sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
- /*
- * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
- * to the targets' merge function since it holds sectors not bytes).
- * Just doing this as an interim fix for stable@ because the more
- * comprehensive cleanup of switching to sector_t will impact every
- * DM target that implements a ->merge hook.
- */
- if (max_size > INT_MAX)
- max_size = INT_MAX;
+ if (max_size < 0)
+ max_size = 0;
/*
* merge_bvec_fn() returns number of bytes
@@ -1765,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
* max is precomputed maximal io size
*/
if (max_size && ti->type->merge)
- max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+ max_size = ti->type->merge(ti, bvm, biovec, max_size);
/*
* If the target doesn't support merge method and some of the devices
- * provided their merge_bvec method (we know this by looking for the
- * max_hw_sectors that dm_set_device_limits may set), then we can't
- * allow bios with multiple vector entries. So always set max_size
- * to 0, and the code below allows just one page.
+ * provided their merge_bvec method (we know this by looking at
+ * queue_max_hw_sectors), then we can't allow bios with multiple vector
+ * entries. So always set max_size to 0, and the code below allows
+ * just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
max_size = 0;
@@ -2281,8 +2270,6 @@ static void dm_init_old_md_queue(struct mapped_device *md)
static void cleanup_mapped_device(struct mapped_device *md)
{
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->wq)
destroy_workqueue(md->wq);
if (md->kworker_task)
@@ -2294,6 +2281,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
/* md_cluster_info flags */
#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
static void recover_prep(void *arg)
{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
cinfo->slot_number = our_slot;
complete(&cinfo->completion);
+ clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
resync_send(mddev, RESYNCING, 0, 0);
}
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+ sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
struct suspend_info *s;
+ if ((direction == READ) &&
+ test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+ return 1;
+
spin_lock_irq(&cinfo->suspend_lock);
if (list_empty(&cinfo->suspend_list))
goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
int (*metadata_update_cancel)(struct mddev *mddev);
- int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
int (*add_new_disk_finish)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
mddev_detach(mddev);
+ /* Ensure ->event_work is done */
+ flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->ready = 0;
mddev->pers = NULL;
@@ -5757,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
char *ptr;
int err;
- file = kmalloc(sizeof(*file), GFP_NOIO);
+ file = kzalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
- return err;
+ return -ENOENT;
}
spin_lock(&pers_lock);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index e04cfd2d60ef..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s < 0 && nr_center < -s) {
/* not enough in central node */
- shift(left, center, nr_center);
- s = nr_center - target;
+ shift(left, center, -nr_center);
+ s += nr_center;
shift(left, right, s);
nr_right += s;
} else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s > 0 && nr_center < s) {
/* not enough in central node */
shift(center, right, nr_center);
- s = target - nr_center;
+ s -= nr_center;
shift(left, right, s);
nr_left -= s;
} else
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
uint64_t k;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
- r = remove_raw(&spine, info, &le64_type,
+ r = remove_raw(&spine, info, &le64_vt,
root, keys[level], (unsigned *) &index);
if (r < 0)
goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
value_ptr(n, index));
delete_at(n, index);
+ keys[last_level] = k + 1ull;
} else
r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
{
return s->root;
}
+
+static void le64_inc(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 200ac12a1d40..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
int r;
struct del_stack *s;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kmalloc(sizeof(*s), GFP_NOIO);
if (!s)
return -ENOMEM;
s->info = info;
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
struct btree_node *n;
struct dm_btree_value_type le64_type;
- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);
for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+ test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
this_sector + sectors)))
choose_first = 1;
else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev,
+ !md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
@@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
+ unsigned long flags;
/*
* If it is not operational, then we have already marked it as dead
@@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
return;
}
set_bit(Blocked, &rdev->flags);
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
- spin_unlock_irqrestore(&conf->device_lock, flags);
} else
set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
@@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
+ * device_lock used to avoid races with raid1_end_read_request
+ * which expects 'In_sync' flags and ->degraded to be consistent.
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
/* far_copies must be 1 */
conf->prev.stride = conf->dev_sectors;
}
+ conf->reshape_safe = conf->reshape_progress;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
}
conf->offset_diff = min_offset_diff;
- conf->reshape_safe = conf->reshape_progress;
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
conf->reshape_progress = size;
} else
conf->reshape_progress = 0;
+ conf->reshape_safe = conf->reshape_progress;
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
/* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc)
return -ENOMEM;
+ /* Need to ensure auto-resizing doesn't interfere */
+ mutex_lock(&conf->cache_size_mutex);
+
for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh);
}
kmem_cache_destroy(sc);
+ mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM;
}
/* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
+ mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2251,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
&first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags);
else {
- if (rdev)
+ if (rdev && !test_bit(Faulty, &rdev->flags))
set_bit(R5_NeedReplace, &dev->flags);
+ else
+ clear_bit(R5_NeedReplace, &dev->flags);
rdev = rcu_dereference(conf->disks[i].rdev);
clear_bit(R5_ReadRepl, &dev->flags);
}
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+ mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short
*/
set_bit(R5_DID_ALLOC, &conf->cache_state);
+ mutex_unlock(&conf->cache_size_mutex);
}
async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL;
conf->min_nr_stripes = size;
+ mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
;
+ mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev);
if (err)
return err;
+ mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL))
break;
+ mutex_unlock(&conf->cache_size_mutex);
return 0;
}
@@ -6371,11 +6384,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
- int ret = 0;
- while (ret < sc->nr_to_scan) {
- if (drop_one_stripe(conf) == 0)
- return SHRINK_STOP;
- ret++;
+ unsigned long ret = SHRINK_STOP;
+
+ if (mutex_trylock(&conf->cache_size_mutex)) {
+ ret= 0;
+ while (ret < sc->nr_to_scan &&
+ conf->max_nr_stripes > conf->min_nr_stripes) {
+ if (drop_one_stripe(conf) == 0) {
+ ret = SHRINK_STOP;
+ break;
+ }
+ ret++;
+ }
+ mutex_unlock(&conf->cache_size_mutex);
}
return ret;
}
@@ -6444,6 +6465,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
+ mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
*/
int active_name;
char cache_name[2][32];
- struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct mutex cache_size_mutex; /* Protect changes to cache size */
int seq_flush, seq_write;
int quiesce;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..5ab90f36a6a6 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@ config DVB_SI21XX
config DVB_TS2020
tristate "Montage Tehnology TS2020 based tuners"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..6a1c0089bb62 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_COBALT
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+ depends on SND
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
also know about dropped frames. */
cb->vb.v4l2_buf.sequence = s->sequence++;
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
- VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
+#ifdef CONFIG_X86_64
+ if (pat_enabled()) {
+ pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
+ return -ENODEV;
+ }
+#endif
+
if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
int registered = 0;
int err;
-#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
- return -ENODEV;
- }
-#endif
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..87990ece5848 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@ err:
int mantis_dma_init(struct mantis_pci *mantis)
{
- int err = 0;
+ int err;
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
- if (mantis_alloc_buffers(mantis) < 0) {
+ err = mantis_alloc_buffers(mantis);
+ if (err < 0) {
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
/* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc5_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
- {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5X_SPACE,
- },
- {
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
- },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
- unsigned int data, xdata, command, commandx, system;
-
- /* Detect protocol and convert scancode to raw data */
- if (protocols & RC_BIT_RC5 &&
- !ir_rc5_validate_filter(scancode, 0x1f7f)) {
- /* decode scancode */
- command = (scancode->data & 0x003f) >> 0;
- commandx = (scancode->data & 0x0040) >> 6;
- system = (scancode->data & 0x1f00) >> 8;
- /* encode data */
- data = !commandx << 12 | system << 6 | command;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5X &&
- !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
- /* decode scancode */
- xdata = (scancode->data & 0x00003f) >> 0;
- command = (scancode->data & 0x003f00) >> 8;
- commandx = (scancode->data & 0x004000) >> 14;
- system = (scancode->data & 0x1f0000) >> 16;
- /* commandx and system overlap, bits must match when encoded */
- if (commandx == (system & 0x1))
- return -EINVAL;
- /* encode data */
- data = 1 << 18 | system << 12 | command << 6 | xdata;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
- CHECK_RC5X_NBITS,
- data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
- if (ret < 0)
- return ret;
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc5x_timings[1],
- RC5X_NBITS - CHECK_RC5X_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5_SZ &&
- !ir_rc5_validate_filter(scancode, 0x2fff)) {
- /* RC5-SZ scancode is raw enough for Manchester as it is */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
- RC5_SZ_NBITS, scancode->data & 0x2fff);
- if (ret < 0)
- return ret;
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc5_handler = {
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
- .encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
- {
- .leader = RC6_PREFIX_PULSE,
- .pulse_space_start = 0,
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_PREFIX_SPACE,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT * 2,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_SUFFIX_SPACE,
- },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
-
- if (protocols & RC_BIT_RC6_0 &&
- !ir_rc6_validate_filter(scancode, 0xffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Mode-0) */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3], RC6_0_NBITS,
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
- RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
- !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Header-version 6 */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3 | 6));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3],
- fls(scancode->mask),
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
- .encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
- const u8 *wakeup_sample_buf, int count)
-{
- int i = 0;
- u8 reg, reg_learn_mode;
- unsigned long flags;
- struct nvt_dev *nvt = dev->priv;
-
- nvt_dbg_wake("writing wakeup samples");
-
- reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
- reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
- reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
- /* Lock the learn area to prevent racing with wake-isr */
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
- /* Enable fifo writes */
- nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
- /* Clear cir wake rx fifo */
- nvt_clear_cir_wake_fifo(nvt);
-
- if (count > WAKE_FIFO_LEN) {
- nvt_dbg_wake("HW FIFO too small for all wake samples");
- count = WAKE_FIFO_LEN;
- }
-
- if (count)
- pr_info("Wake samples (%d) =", count);
- else
- pr_info("Wake sample fifo cleared");
-
- /* Write wake samples to fifo */
- for (i = 0; i < count; i++) {
- pr_cont(" %02x", wakeup_sample_buf[i]);
- nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
- CIR_WAKE_WR_FIFO_DATA);
- }
- pr_cont("\n");
-
- /* Switch cir to wakeup mode and disable fifo writing */
- nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
- /* Set number of bytes needed for wake */
- nvt_cir_wake_reg_write(nvt, count ? count :
- CIR_WAKE_FIFO_CMP_BYTES,
- CIR_WAKE_FIFO_CMP_DEEP);
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
- return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- u8 *reg_buf;
- u8 buf_val;
- int i, ret, count;
- unsigned int val;
- struct ir_raw_event *raw;
- bool complete;
-
- /* Require both mask and data to be set before actually committing */
- if (!sc_filter->mask || !sc_filter->data)
- return 0;
-
- raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
- if (!raw)
- return -ENOMEM;
-
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, WAKE_FIFO_LEN);
- complete = (ret != -ENOBUFS);
- if (!complete)
- ret = WAKE_FIFO_LEN;
- else if (ret < 0)
- goto out_raw;
-
- reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
- if (!reg_buf) {
- ret = -ENOMEM;
- goto out_raw;
- }
-
- /* Inspect the ir samples */
- for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
- val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
- /* Split too large values into several smaller ones */
- while (val > 0 && count < WAKE_FIFO_LEN) {
-
- /* Skip last value for better comparison tolerance */
- if (complete && i == ret - 1 && val < BUF_LEN_MASK)
- break;
-
- /* Clamp values to BUF_LEN_MASK at most */
- buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
- reg_buf[count] = buf_val;
- val -= buf_val;
- if ((raw[i]).pulse)
- reg_buf[count] |= BUF_PULSE_BIT;
- count++;
- }
- }
-
- ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
- kfree(reg_buf);
-out_raw:
- kfree(raw);
-
- return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
- u64 *rc_type)
-{
- return 0;
-}
-
/*
* nvt_tx_ir
*
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->encode_wakeup = true;
rdev->allowed_protocols = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
- rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
- rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
*/
#define TX_BUF_LEN 256
#define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
struct nvt_dev {
struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
- int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
- unsigned int pulse,
- u32 duration)
-{
- init_ir_raw_event(ev);
- ev->duration = duration;
- ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader: duration of leader pulse (if any) 0 if continuing
- * existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock: duration of each pulse/space in ns
- * @invert: if set clock logic is inverted
- * (0 = space + pulse, 1 = pulse + space)
- * @trailer_space: duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
- unsigned int leader;
- unsigned int pulse_space_start:1;
- unsigned int clock;
- unsigned int invert:1;
- unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data);
-
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
int ir_raw_event_register(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-static u64 encode_protocols;
static int ir_raw_event_thread(void *data)
{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
return protocols;
}
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
- u64 protocols;
-
- mutex_lock(&ir_raw_handler_lock);
- protocols = encode_protocols;
- mutex_unlock(&ir_raw_handler_lock);
- return protocols;
-}
-
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev: Pointer to pointer to next free event. *@ev is incremented for
- * each raw event filled.
- * @max: Maximum number of raw events to fill.
- * @timings: Manchester modulation timings.
- * @n: Number of bits of data.
- * @data: Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns: 0 on success.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * full encoded data. In this case all @max events will have been
- * written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data)
-{
- bool need_pulse;
- unsigned int i;
- int ret = -ENOBUFS;
-
- i = 1 << (n - 1);
-
- if (timings->leader) {
- if (!max--)
- return ret;
- if (timings->pulse_space_start) {
- init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
- if (!max--)
- return ret;
- init_ir_raw_event_duration((*ev), 0, timings->leader);
- } else {
- init_ir_raw_event_duration((*ev), 1, timings->leader);
- }
- i >>= 1;
- } else {
- /* continue existing signal */
- --(*ev);
- }
- /* from here on *ev will point to the last event rather than the next */
-
- while (n && i > 0) {
- need_pulse = !(data & i);
- if (timings->invert)
- need_pulse = !need_pulse;
- if (need_pulse == !!(*ev)->pulse) {
- (*ev)->duration += timings->clock;
- } else {
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), need_pulse,
- timings->clock);
- }
-
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), !need_pulse,
- timings->clock);
- i >>= 1;
- }
-
- if (timings->trailer_space) {
- if (!(*ev)->pulse)
- (*ev)->duration += timings->trailer_space;
- else if (!max--)
- goto nobufs;
- else
- init_ir_raw_event_duration(++(*ev), 0,
- timings->trailer_space);
- }
-
- ret = 0;
-nobufs:
- /* point to the next event rather than last event before returning */
- ++(*ev);
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols: permitted protocols
- * @scancode: scancode filter describing a single scancode
- * @events: array of raw events to write into
- * @max: max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid, or if no
- * compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- struct ir_raw_handler *handler;
- int ret = -EINVAL;
-
- mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list) {
- if (handler->protocols & protocols && handler->encode) {
- ret = handler->encode(protocols, scancode, events, max);
- if (ret >= 0 || ret == -ENOBUFS)
- break;
- }
- }
- mutex_unlock(&ir_raw_handler_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
/*
* Used to (un)register raw event clients
*/
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->dev);
available_protocols &= ~ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols &= ~ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
return 0;
}
-static int loop_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- static const unsigned int max = 512;
- struct ir_raw_event *raw;
- int ret;
- int i;
-
- /* fine to disable filter */
- if (!sc_filter->mask)
- return 0;
-
- /* encode the specified filter and loop it back */
- raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, max);
- /* still loop back the partial raw IR even if it's incomplete */
- if (ret == -ENOBUFS)
- ret = max;
- if (ret >= 0) {
- /* do the loopback */
- for (i = 0; i < ret; ++i)
- ir_raw_event_store(dev, &raw[i]);
- ir_raw_event_handle(dev);
-
- ret = 0;
- }
-
- kfree(raw);
-
- return ret;
-}
-
static int __init loop_init(void)
{
struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->encode_wakeup = true;
rc->allowed_protocols = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..0ff388a16168 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
} else {
enabled = dev->enabled_wakeup_protocols;
allowed = dev->allowed_wakeup_protocols;
- if (dev->encode_wakeup && !allowed)
- allowed = ir_raw_get_encode_protocols();
}
mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
path ? path : "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+ if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* Load raw decoders, if they aren't already */
if (!raw_init) {
IR_dprintk(1, "Loading raw decoders\n");
ir_raw_init();
raw_init = true;
}
- }
-
- if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* calls ir_register_device so unlock mutex here*/
mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..a14c428f70e9 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
- state != VB2_BUF_STATE_QUEUED))
+ state != VB2_BUF_STATE_QUEUED &&
+ state != VB2_BUF_STATE_REQUEUEING))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
- /* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
- vb->state = state;
- if (state != VB2_BUF_STATE_QUEUED)
+ if (state == VB2_BUF_STATE_QUEUED ||
+ state == VB2_BUF_STATE_REQUEUEING) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
- if (state == VB2_BUF_STATE_QUEUED) {
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ case VB2_BUF_STATE_REQUEUEING:
if (q->start_streaming_called)
__enqueue_in_driver(vb);
return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
}
-
- /* Inform any processes that may be waiting for buffers */
- wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
- static bool __check_once __read_mostly;
+ static bool check_once;
- if (__check_once)
+ if (check_once)
return;
- __check_once = true;
- __WARN();
+ check_once = true;
+ WARN_ON(1);
- pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
else
- pr_warn_once("use the actual size instead.\n");
+ pr_warn("use the actual size instead.\n");
}
/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84ad3ec..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 653815950aa2..3f68dd251ce8 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
config MFD_CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+ depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
---help---
If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..a72ddb295078 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
arizona->has_fully_powered_off = true;
- disable_irq(arizona->irq);
+ disable_irq_nosync(arizona->irq);
arizona_enable_reset(arizona);
regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.gpio_defaults[i]);
}
- pm_runtime_set_autosuspend_delay(arizona->dev, 100);
- pm_runtime_use_autosuspend(arizona->dev);
- pm_runtime_enable(arizona->dev);
-
/* Chip default */
if (!arizona->pdata.clk32k_src)
arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.spk_fmt[i]);
}
+ pm_runtime_set_active(arizona->dev);
+ pm_runtime_enable(arizona->dev);
+
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
goto err_reset;
+ pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+ pm_runtime_use_autosuspend(arizona->dev);
+
arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
arizona_clkgen_err, arizona);
arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
goto err_irq;
}
-#ifdef CONFIG_PM
- regulator_disable(arizona->dcvdd);
-#endif
-
return 0;
err_irq:
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 5c054031c3f8..e14c8c9d189b 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
*/
#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index a81badbaa917..6fdb30e84a2b 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
*/
#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..6ded3dc36644 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
{
struct at24_data *at24;
- if (unlikely(off >= attr->size))
- return -EFBIG;
-
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_write(at24, buf, off, count);
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
cdev_init(&dev->cdev, &mei_fops);
- dev->cdev.owner = mei_fops.owner;
+ dev->cdev.owner = parent->driver->owner;
/* Add the device */
ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
}
static struct scatterlist *
-scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
{
struct scatterlist *sg;
struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
return NULL;
sg_init_table(sg, page_cnt);
for (i = 0; i < page_cnt; i++) {
- page = vmalloc_to_page((void __force *)va);
- if (!page)
- goto p2p_sg_err;
+ page = pfn_to_page(pa >> PAGE_SHIFT);
sg_set_page(&sg[i], page, page_size, 0);
- va += page_size;
+ pa += page_size;
}
return sg;
-p2p_sg_err:
- kfree(sg);
- return NULL;
}
/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
if (!p2p)
return NULL;
- p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+ p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
PAGE_SIZE, num_mmio_pages);
if (!p2p->ppi_sg[SCIF_PPI_MMIO])
goto free_p2p;
p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
- p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+ p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
1 << sg_page_shift,
num_aper_chunks);
p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ mmc_blk_put(md);
+
return ret;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
config MMC_MTK
tristate "MediaTek SD/MMC Card Interface support"
+ depends on HAS_DMA
help
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
if (status & (CTO_EN | CCRC_EN))
end_cmd = 1;
+ if (host->data || host->response_busy) {
+ end_trans = !end_cmd;
+ host->response_busy = 0;
+ }
if (status & (CTO_EN | DTO_EN))
hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
- else if (status & (CCRC_EN | DCRC_EN))
+ else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+ BADA_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
}
- if (host->data || host->response_busy) {
- end_trans = !end_cmd;
- host->response_busy = 0;
- }
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
- return boarddata->f_max;
- else
- return pltfm_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return -ENODEV;
-
- if (of_get_property(np, "non-removable", NULL))
- boarddata->cd_type = ESDHC_CD_PERMANENT;
-
- if (of_get_property(np, "fsl,cd-controller", NULL))
- boarddata->cd_type = ESDHC_CD_CONTROLLER;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int ret;
if (of_get_property(np, "fsl,wp-controller", NULL))
boarddata->wp_type = ESDHC_WP_CONTROLLER;
- boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- if (gpio_is_valid(boarddata->cd_gpio))
- boarddata->cd_type = ESDHC_CD_GPIO;
-
boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
- of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
- of_property_read_u32(np, "max-frequency", &boarddata->f_max);
-
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+ !IS_ERR(imx_data->pins_default)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /*
+ * fall back to not support uhs by specify no 1.8v quirk
+ */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
/* call to generic mmc_of_parse to support additional capabilities */
- return mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ return 0;
}
#else
static inline int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
return -ENODEV;
}
#endif
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct pltfm_imx_data *imx_data)
+{
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int err;
+
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ return -EINVAL;
+ }
+
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ return err;
+ }
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* card_detect */
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request card-detect gpio!\n");
+ return err;
+ }
+ /* fall through */
+
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
+ }
+
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ return 0;
+}
+
static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- struct esdhc_platform_data *boarddata;
int err;
struct pltfm_imx_data *imx_data;
- bool dt = true;
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- boarddata = &imx_data->boarddata;
- if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
- if (!host->mmc->parent->platform_data) {
- dev_err(mmc_dev(host->mmc), "no board data!\n");
- err = -EINVAL;
- goto disable_clk;
- }
- imx_data->boarddata = *((struct esdhc_platform_data *)
- host->mmc->parent->platform_data);
- dt = false;
- }
- /* write_protect */
- if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
- err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request write-protect gpio!\n");
- goto disable_clk;
- }
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- }
-
- /* card_detect */
- switch (boarddata->cd_type) {
- case ESDHC_CD_GPIO:
- if (dt)
- break;
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request card-detect gpio!\n");
- goto disable_clk;
- }
- /* fall through */
-
- case ESDHC_CD_CONTROLLER:
- /* we have a working card_detect back */
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- break;
-
- case ESDHC_CD_PERMANENT:
- host->mmc->caps |= MMC_CAP_NONREMOVABLE;
- break;
-
- case ESDHC_CD_NONE:
- break;
- }
-
- switch (boarddata->max_bus_width) {
- case 8:
- host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
- break;
- case 4:
- host->mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- default:
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- break;
- }
-
- /* sdr50 and sdr104 needs work on 1.8v signal voltage */
- if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
- !IS_ERR(imx_data->pins_default)) {
- imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_100MHZ);
- imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /* fall back to not support uhs by specify no 1.8v quirk */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
+ if (of_id)
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ else
+ err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
+ if (err)
+ goto disable_clk;
err = sdhci_add_host(host);
if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
#define ESDHC_DMA_SYSCTL 0x40c
#define ESDHC_DMA_SNOOP 0x00000040
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
goto err_of_parse;
sdhci_get_of_property(pdev);
pdata = pxav3_get_mmc_pdata(dev);
+ pdev->dev.platform_data = pdata;
} else if (pdata) {
/* on-chip device */
if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index df088343d60f..255a896769b8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Inspired by sdhci-pltfm.c
*
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
+ u32 max_clk;
int ret;
WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
GFP_KERNEL);
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
+ if (host->adma_table)
+ dma_free_coherent(mmc_dev(mmc),
+ host->adma_table_sz,
+ host->adma_table,
+ host->adma_addr);
kfree(host->align_buffer);
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
- mmc->f_max = host->max_clk;
+ max_clk = host->max_clk;
+
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
- mmc->f_max = host->max_clk * host->clk_mul;
+ max_clk = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ mmc->f_max = max_clk;
+
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 19eb990d398c..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
+static struct slave *bond_get_old_active(struct bonding *bond,
+ struct slave *new_active)
+{
+ struct slave *slave;
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave == new_active)
+ continue;
+
+ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+ return slave;
+ }
+
+ return NULL;
+}
+
/* bond_do_fail_over_mac
*
* Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!new_active)
return;
+ if (!old_active)
+ old_active = bond_get_old_active(bond, new_active);
+
if (old_active) {
ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
ether_addr_copy(saddr.sa_data,
@@ -689,40 +709,57 @@ out:
}
-static bool bond_should_change_active(struct bonding *bond)
+static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
- if (!prim || !curr || curr->link != BOND_LINK_UP)
- return true;
+ if (!prim || prim->link != BOND_LINK_UP) {
+ if (!curr || curr->link != BOND_LINK_UP)
+ return NULL;
+ return curr;
+ }
+
if (bond->force_primary) {
bond->force_primary = false;
- return true;
+ return prim;
+ }
+
+ if (!curr || curr->link != BOND_LINK_UP)
+ return prim;
+
+ /* At this point, prim and curr are both up */
+ switch (bond->params.primary_reselect) {
+ case BOND_PRI_RESELECT_ALWAYS:
+ return prim;
+ case BOND_PRI_RESELECT_BETTER:
+ if (prim->speed < curr->speed)
+ return curr;
+ if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
+ return curr;
+ return prim;
+ case BOND_PRI_RESELECT_FAILURE:
+ return curr;
+ default:
+ netdev_err(bond->dev, "impossible primary_reselect %d\n",
+ bond->params.primary_reselect);
+ return curr;
}
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
- (prim->speed < curr->speed ||
- (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
- return false;
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
- return false;
- return true;
}
/**
- * find_best_interface - select the best available slave to be the active one
+ * bond_find_best_slave - select the best available slave to be the active one
* @bond: our bonding struct
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *slave, *bestslave = NULL, *primary;
+ struct slave *slave, *bestslave = NULL;
struct list_head *iter;
int mintime = bond->params.updelay;
- primary = rtnl_dereference(bond->primary_slave);
- if (primary && primary->link == BOND_LINK_UP &&
- bond_should_change_active(bond))
- return primary;
+ slave = bond_choose_primary_or_current(bond);
+ if (slave)
+ return slave;
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
@@ -749,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ !netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
@@ -1708,9 +1746,16 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
- if (!bond_has_slaves(bond) &&
- ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
- eth_hw_addr_random(bond_dev);
+ if (!bond_has_slaves(bond)) {
+ if (ether_addr_equal_64bits(bond_dev->dev_addr,
+ slave_dev->dev_addr))
+ eth_hw_addr_random(bond_dev);
+ if (bond_dev->type != ARPHRD_ETHER) {
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+ }
return res;
}
@@ -1899,6 +1944,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
bond_dev->name);
+ bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
}
/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
}
at91_read_mb(dev, mb, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
return 0;
at91_poll_err_frame(dev, cf, reg_sr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
return;
at91_irq_err_state(dev, cf, new_state);
- netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
priv->can.state = new_state;
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
priv->can.state = state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 041525d2595c..5d214d135332 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int err;
+ struct pinctrl *p;
/* basic c_can configuration */
err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* activate pins */
- pinctrl_pm_select_default_state(dev->dev.parent);
+ /* Attempt to use "active" if available else use "default" */
+ p = pinctrl_get_select(priv->device, "active");
+ if (!IS_ERR(p))
+ pinctrl_put(p);
+ else
+ pinctrl_pm_select_default_state(priv->device);
+
return 0;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
for (i = 0; i < cf->can_dlc; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e9b1810d319f..aede704605c6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
memset(*cfd, 0, sizeof(struct canfd_frame));
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
return 0;
do_bus_err(dev, cf, reg_esr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- netif_receive_skb(skb);
-
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
}
flexcan_read_fifo(dev, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
cf->data[i] = (u8)(slot[j] >> shift);
}
}
- netif_receive_skb(skb);
/* Update statistics and read pointer */
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
}
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7deb80dcbe8c..7bd54191f962 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
err = clk_prepare_enable(priv->clk);
if (err) {
- netdev_err(ndev, "failed to enable periperal clock, error %d\n",
+ netdev_err(ndev,
+ "failed to enable peripheral clock, error %d\n",
err);
goto out;
}
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ netdev_err(ndev, "request_irq(%d) failed, error %d\n",
+ ndev->irq, err);
goto out_close;
}
can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
+ err = irq;
goto fail;
}
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "clkp1");
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
goto fail_clk;
}
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
devm_can_led_init(ndev);
- dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
priv->regs, ndev->irq);
return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f64f5290d6f8..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
if (!skb)
return;
- __net_timestamp(skb);
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -215,13 +214,14 @@ static void slc_bump(struct slcan *sl)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
- netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
+ netif_rx_ni(skb);
}
/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->power = devm_regulator_get(&spi->dev, "vdd");
- priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
+
+ if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
- mcp251x_power_enable(priv->transceiver, 1);
- queue_work(priv->wq, &priv->restart_work);
- } else {
- priv->after_suspend = 0;
- }
+ priv->after_suspend = 0;
}
+
priv->force_quit = 0;
enable_irq(spi->irq);
return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
cf->data[i] = msg->msg.can_msg.msg[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
stats->rx_errors++;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[7] = rxerr;
}
- netif_rx(skb);
-
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
cf->data[i] = msg->msg.rx.data[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
hwts->hwtstamp = timeval_to_ktime(tv);
}
- netif_rx(skb);
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- /* push the skb */
- netif_rx(skb);
-
/* update statistics */
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ /* push the skb */
+ netif_rx(skb);
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
else
memcpy(cf->data, msg->data, cf->can_dlc);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
} else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0ce868de855d..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx_ni(skb);
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
}
/* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround
+ * divert reads towards our workaround. This is only required for
+ * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
+ * that we can use the regular SWITCH_MDIO master controller instead.
+ *
+ * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
+ * to have a 1:1 mapping between Port address and PHY address in order
+ * to utilize the slave_mii_bus instance to read from Port PHYs. This is
+ * not what we want here, so we initialize phys_mii_mask 0 to always
+ * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
*/
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ if (of_machine_is_compatible("brcm,bcm7445d0"))
+ ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ else
+ ds->phys_mii_mask = 0;
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
newfid = __ffs(ps->fid_mask);
ps->fid[port] = newfid;
- ps->fid_mask &= (1 << newfid);
+ ps->fid_mask &= ~(1 << newfid);
ps->bridge_mask[fid] &= ~(1 << port);
ps->bridge_mask[newfid] = 1 << port;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad97f..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
@@ -2382,6 +2381,7 @@ boomerang_interrupt(int irq, void *dev_id)
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
+ int handled = 0;
ioaddr = vp->ioaddr;
@@ -2400,6 +2400,7 @@ boomerang_interrupt(int irq, void *dev_id)
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
+ handled = 1;
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
@@ -2501,7 +2502,7 @@ boomerang_interrupt(int irq, void *dev_id)
handler_exit:
vp->handling_irq = 0;
spin_unlock(&vp->lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static int vortex_rx(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 661cdaa7ea96..b3bc87fe3764 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
get_page(pa->pages);
bd->pa = *pa;
- bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 506e832c9e9a..a4473d8ff4fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
unsigned int rx_usecs = pdata->rx_usecs;
unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
+ dma_addr_t hdr_dma, buf_dma;
if (!rx_usecs && !rx_frames) {
/* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+ hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+ buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
/* Start with the header buffer which may contain just the header
* or the header plus data
*/
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
- rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
if (!skb)
error = 1;
} else if (rdesc_len) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.buf.dma,
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 63d72a140053..717ce21b6077 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
- dma_addr_t dma;
+ dma_addr_t dma_base;
+ unsigned long dma_off;
unsigned int dma_len;
};
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 909ad7a0d480..4566cdf0bc39 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
macaddr = of_get_mac_address(dn);
if (!macaddr || !is_valid_ether_addr(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
} else {
ether_addr_copy(dev->dev_addr, macaddr);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b43b2cb9b830..64c1e9db6b0b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
new_skb = skb_realloc_headroom(skb, sizeof(*status));
dev_kfree_skb(skb);
if (!new_skb) {
- dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return NULL;
}
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
goto next;
}
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dev->stats.rx_frame_errors++;
if (dma_flag & DMA_RX_LG)
dev->stats.rx_length_errors++;
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ac27e24264a5..f557a2aaec23 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
__raw_writeq(reg, port);
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
- /*
- * Pass1 SOCs do not receive packets addressed to the
- * destination address in the R_MAC_ETHERNET_ADDR register.
- * Set the value to zero.
- */
- __raw_writeq(0, port);
-#else
__raw_writeq(reg, port);
-#endif
/*
* Set the receive filter for no packets, and write values
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
}
+/* I/O accessors */
+static u32 hw_readl_native(struct macb *bp, int offset)
+{
+ return __raw_readl(bp->regs + offset);
+}
+
+static void hw_writel_native(struct macb *bp, int offset, u32 value)
+{
+ __raw_writel(value, bp->regs + offset);
+}
+
+static u32 hw_readl(struct macb *bp, int offset)
+{
+ return readl_relaxed(bp->regs + offset);
+}
+
+static void hw_writel(struct macb *bp, int offset, u32 value)
+{
+ writel_relaxed(value, bp->regs + offset);
+}
+
+/*
+ * Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swaped mode for management
+ * descriptor access.
+ */
+static bool hw_is_native_io(void __iomem *addr)
+{
+ u32 value = MACB_BIT(LLB);
+
+ __raw_writel(value, addr + MACB_NCR);
+ value = __raw_readl(addr + MACB_NCR);
+
+ /* Write 0 back to disable everything */
+ __raw_writel(0, addr + MACB_NCR);
+
+ return value == MACB_BIT(LLB);
+}
+
+static bool hw_is_gem(void __iomem *addr, bool native_io)
+{
+ u32 id;
+
+ if (native_io)
+ id = __raw_readl(addr + MACB_MID);
+ else
+ id = readl_relaxed(addr + MACB_MID);
+
+ return MACB_BFEXT(IDNUM, id) >= 0x2;
+}
+
static void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
}
}
- netdev_info(bp->dev, "invalid hw address, using random\n");
+ dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
eth_hw_addr_random(bp->dev);
}
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
unsigned long flags;
-
int status_change = 0;
spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 __iomem *reg = bp->regs + MACB_PFR;
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, reg++)
- *p += readl_relaxed(reg);
+ for(; p < end; p++, offset += 4)
+ *p += bp->macb_reg_readl(bp, offset);
}
static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
}
#endif
-static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
- unsigned int len)
-{
- return (len + bp->max_tx_length - 1) / bp->max_tx_length;
-}
-
static unsigned int macb_tx_map(struct macb *bp,
struct macb_queue *queue,
struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
* socket buffer: skb fragments of jumbo frames may need to be
* splitted into many buffer descriptors.
*/
- count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
- count += macb_count_tx_descriptors(bp, frag_size);
+ count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
static void macb_configure_dma(struct macb *bp)
{
u32 dmacfg;
- u32 tmp, ncr;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
dmacfg &= ~GEM_BIT(ENDIA_PKT);
- /* Find the CPU endianness by using the loopback bit of net_ctrl
- * register. save it first. When the CPU is in big endian we
- * need to program swaped mode for management descriptor access.
- */
- ncr = macb_readl(bp, NCR);
- __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
- tmp = __raw_readl(bp->regs + MACB_NCR);
-
- if (tmp == MACB_BIT(LLB))
+ if (bp->native_io)
dmacfg &= ~GEM_BIT(ENDIA_DESC);
else
dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
- /* Restore net_ctrl */
- macb_writel(bp, NCR, ncr);
-
if (bp->dev->features & NETIF_F_HW_CSUM)
dmacfg |= GEM_BIT(TXCOEN);
else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- int i;
+ unsigned int i;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
- u64 val = readl_relaxed(bp->regs + offset);
+ u64 val = bp->macb_reg_readl(bp, offset);
bp->ethtool_stats[i] += val;
*p += val;
if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
/* Add GEM_OCTTXH, GEM_OCTRXH */
- val = readl_relaxed(bp->regs + offset + 4);
+ val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
*(++p) += val;
}
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
- int i;
+ unsigned int i;
switch (sset) {
case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
if (dt_conf)
bp->caps = dt_conf->caps;
- if (macb_is_gem_hw(bp->regs)) {
+ if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
bp->caps |= MACB_CAPS_FIFO_MODE;
}
- netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+ dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
static void macb_probe_queues(void __iomem *mem,
+ bool native_io,
unsigned int *queue_mask,
unsigned int *num_queues)
{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
* we are early in the probe process and don't have the
* MACB_CAPS_MACB_IS_GEM flag positioned
*/
- if (!macb_is_gem_hw(mem))
+ if (!hw_is_gem(mem, native_io))
return;
/* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *pclk, *hclk, *tx_clk;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
+ bool native_io;
struct phy_device *phydev;
struct net_device *dev;
struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
struct macb *bp;
int err;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
if (np) {
const struct of_device_id *match;
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
if (err)
return err;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(mem)) {
- err = PTR_ERR(mem);
- goto err_disable_clocks;
- }
+ native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, &queue_mask, &num_queues);
+ macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
bp->pdev = pdev;
bp->dev = dev;
bp->regs = mem;
+ bp->native_io = native_io;
+ if (native_io) {
+ bp->macb_reg_readl = hw_readl_native;
+ bp->macb_reg_writel = hw_writel_native;
+ } else {
+ bp->macb_reg_readl = hw_readl;
+ bp->macb_reg_writel = hw_writel;
+ }
bp->num_queues = num_queues;
bp->queue_mask = queue_mask;
if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
- if (macb_config->jumbo_max_len) {
+ if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
- }
spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
| GEM_BF(name, value))
/* Register access macros */
-#define macb_readl(port,reg) \
- readl_relaxed((port)->regs + MACB_##reg)
-#define macb_writel(port,reg,value) \
- writel_relaxed((value), (port)->regs + MACB_##reg)
-#define gem_readl(port, reg) \
- readl_relaxed((port)->regs + GEM_##reg)
-#define gem_writel(port, reg, value) \
- writel_relaxed((value), (port)->regs + GEM_##reg)
-#define queue_readl(queue, reg) \
- readl_relaxed((queue)->bp->regs + (queue)->reg)
-#define queue_writel(queue, reg, value) \
- writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
+#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
+#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
+#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
+#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
+#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
+#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
struct macb {
void __iomem *regs;
+ bool native_io;
+
+ /* hardware IO accessors */
+ u32 (*macb_reg_readl)(struct macb *bp, int offset);
+ void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
unsigned int rx_tail;
unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
+ int link;
+ int speed;
+ int duplex;
u32 caps;
unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
-static inline bool macb_is_gem_hw(void __iomem *addr)
-{
- return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
-}
-
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
depends on 64BIT
- default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
*/
#define NICPF_CLK_PER_INT_TICK 2
+/* Time to wait before we decide that a SQ is stuck.
+ *
+ * Since both pkt rx and tx notifications are done with same CQ,
+ * when packets are being received at very high rate (eg: L2 forwarding)
+ * then freeing transmitted skbs will be delayed and watchdog
+ * will kick in, resetting interface. Hence keeping this value high.
+ */
+#define NICVF_TX_TIMEOUT (50 * HZ)
+
struct nicvf_cq_poll {
u8 cq_idx; /* Completion queue index */
struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
- u64 tx_busy;
u64 tx_tso;
+ u64 txq_stop;
+ u64 txq_wake;
};
struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops),
NICVF_DRV_STAT(tx_frames_ok),
- NICVF_DRV_STAT(tx_busy),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
+ NICVF_DRV_STAT(txq_stop),
+ NICVF_DRV_STAT(txq_wake),
};
static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
+ struct nicvf *nic = netdev_priv(netdev);
int stats, qidx;
if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
data += ETH_GSTRING_LEN;
}
- for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
sprintf(data, "rxq%d: %s", qidx,
nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
}
}
- for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
sprintf(data, "txq%d: %s", qidx,
nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static int nicvf_get_sset_count(struct net_device *netdev, int sset)
{
+ struct nicvf *nic = netdev_priv(netdev);
+
if (sset != ETH_SS_STATS)
return -EINVAL;
return nicvf_n_hw_stats + nicvf_n_drv_stats +
(nicvf_n_queue_stats *
- (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+ (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
}
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
*(data++) = ((u64 *)&nic->drv_stats)
[nicvf_drv_stats[stat].index];
- for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
for (stat = 0; stat < nicvf_n_queue_stats; stat++)
*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
[nicvf_queue_stats[stat].index];
}
- for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
for (stat = 0; stat < nicvf_n_queue_stats; stat++)
*(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
[nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
{
struct nicvf *nic = netdev_priv(dev);
int err = 0;
+ bool if_up = netif_running(dev);
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
return -EINVAL;
+ if (if_up)
+ nicvf_stop(dev);
+
nic->qs->rq_cnt = channel->rx_count;
nic->qs->sq_cnt = channel->tx_count;
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
if (err)
return err;
- if (!netif_running(dev))
- return err;
+ if (if_up)
+ nicvf_open(dev);
- nicvf_stop(dev);
- nicvf_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
nic->qs->sq_cnt, nic->qs->rq_cnt);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->duplex == DUPLEX_FULL ?
"Full duplex" : "Half duplex");
netif_carrier_on(nic->netdev);
- netif_tx_wake_all_queues(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
} else {
netdev_info(nic->netdev, "%s: Link is Down\n",
nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
if (skb) {
prefetch(skb);
dev_consume_skb_any(skb);
+ sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
}
}
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct napi_struct *napi, int budget)
{
- int processed_cqe, work_done = 0;
+ int processed_cqe, work_done = 0, tx_done = 0;
int cqe_count, cqe_head;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
+ struct netdev_queue *txq;
spin_lock_bh(&cq->lock);
loop:
@@ -496,8 +498,8 @@ loop:
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cqe_head &= 0xFFFF;
- netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
- __func__, cqe_count, cqe_head);
+ netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
+ __func__, cq_idx, cqe_count, cqe_head);
while (processed_cqe < cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
break;
}
- netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
- cq_desc->cqe_type);
+ netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
+ cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND);
+ tx_done++;
break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
}
processed_cqe++;
}
- netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
- __func__, processed_cqe, work_done, budget);
+ netdev_dbg(nic->netdev,
+ "%s CQ%d processed_cqe %d work_done %d budget %d\n",
+ __func__, cq_idx, processed_cqe, work_done, budget);
/* Ring doorbell to inform H/W to reuse processed CQEs */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
goto loop;
done:
+ /* Wakeup TXQ if its stopped earlier due to SQ full */
+ if (tx_done) {
+ txq = netdev_get_tx_queue(netdev, cq_idx);
+ if (netif_tx_queue_stopped(txq)) {
+ netif_tx_start_queue(txq);
+ nic->drv_stats.txq_wake++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit queue wakeup SQ%d\n",
+ netdev->name, cq_idx);
+ }
+ }
+
spin_unlock_bh(&cq->lock);
return work_done;
}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_cq_poll *cq;
- struct netdev_queue *txq;
cq = container_of(napi, struct nicvf_cq_poll, napi);
work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
- txq = netdev_get_tx_queue(netdev, cq->cq_idx);
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
-
if (work_done < budget) {
/* Slow packet rate, exit polling */
napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+ if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
- nic->drv_stats.tx_busy++;
+ nic->drv_stats.txq_stop++;
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
nicvf_send_msg_to_pf(nic, &mbx);
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
/* Disable RBDR & QS error interrupts */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
kfree(cq_poll);
}
+ netif_tx_disable(netdev);
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nic->drv_stats.txq_stop = 0;
+ nic->drv_stats.txq_wake = 0;
+
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = netdev->features;
netdev->netdev_ops = &nicvf_netdev_ops;
+ netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
INIT_WORK(&nic->reset_task, nicvf_reset_task);
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void nicvf_shutdown(struct pci_dev *pdev)
+{
+ nicvf_remove(pdev);
+}
+
static struct pci_driver nicvf_driver = {
.name = DRV_NAME,
.id_table = nicvf_id_table,
.probe = nicvf_probe,
.remove = nicvf_remove,
+ .shutdown = nicvf_shutdown,
};
static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
/* Allocate a new page */
if (!nic->rb_page) {
- nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
if (!nic->rb_page) {
- netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+ netdev_err(nic->netdev,
+ "Failed to allocate new rcv buffer\n");
return -ENOMEM;
}
nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
return;
if (sq->tso_hdrs)
- dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+ dma_free_coherent(&nic->pdev->dev,
+ sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
continue;
}
skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
atomic64_add(hdr->tot_len,
(atomic64_t *)&netdev->stats.tx_bytes);
- dev_kfree_skb_any(skb);
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
memset(gather, 0, SND_QUEUE_DESC_SIZE);
gather->subdesc_type = SQ_DESC_TYPE_GATHER;
- gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
gather->size = size;
gather->addr = data;
}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
}
nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
seg_subdescs - 1, skb, seg_len);
- sq->skbuff[hdr_qentry] = 0;
+ sq->skbuff[hdr_qentry] = (u64)NULL;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
skb_get_queue_mapping(skb), desc_cnt);
+ nic->drv_stats.tx_tso++;
return 1;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
-#define SND_QSIZE SND_QUEUE_SIZE4
+#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
/* Since timestamp not enabled, otherwise 2 */
#define MAX_CQE_PER_PKT_XMIT 1
-#define CMP_QSIZE CMP_QUEUE_SIZE4
+/* Keep CQ and SQ sizes same, if timestamping
+ * is enabled this equation will change.
+ */
+#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
-#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+/* Calculate number of CQEs to reserve for all SQEs.
+ * Its 1/256th level of CQ size.
+ * '+ 1' to account for pipelining
+ */
+#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
+ (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
bgx_flush_dmac_addrs(bgx, lmacid);
- if (lmac->phydev)
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 484eb8c37489..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v)
* eventually have to put a format interpreter in here ...
*/
seq_printf(seq, "%10d %15llu %8s %8s ",
- e->seqno, e->timestamp,
+ be32_to_cpu(e->seqno),
+ be64_to_cpu(e->timestamp),
(e->level < ARRAY_SIZE(devlog_level_strings)
? devlog_level_strings[e->level]
: "UNKNOWN"),
(e->facility < ARRAY_SIZE(devlog_facility_strings)
? devlog_facility_strings[e->facility]
: "UNKNOWN"));
- seq_printf(seq, e->fmt, e->params[0], e->params[1],
- e->params[2], e->params[3], e->params[4],
- e->params[5], e->params[6], e->params[7]);
+ seq_printf(seq, e->fmt,
+ be32_to_cpu(e->params[0]),
+ be32_to_cpu(e->params[1]),
+ be32_to_cpu(e->params[2]),
+ be32_to_cpu(e->params[3]),
+ be32_to_cpu(e->params[4]),
+ be32_to_cpu(e->params[5]),
+ be32_to_cpu(e->params[6]),
+ be32_to_cpu(e->params[7]));
}
return 0;
}
@@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file)
return ret;
}
- /* Translate log multi-byte integral elements into host native format
- * and determine where the first entry in the log is.
+ /* Find the earliest (lowest Sequence Number) log entry in the
+ * circular Device Log.
*/
for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
struct fw_devlog_e *e = &dinfo->log[index];
- int i;
__u32 seqno;
if (e->timestamp == 0)
continue;
- e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
seqno = be32_to_cpu(e->seqno);
- for (i = 0; i < 8; i++)
- e->params[i] =
- (__force __be32)be32_to_cpu(e->params[i]);
-
if (seqno < fseqno) {
fseqno = seqno;
dinfo->first = index;
@@ -2331,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size));
}
} else {
- if (i & EXT_MEM_ENABLE_F)
+ if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index da2004e2a741..918a8e42139b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
wq_work_done,
0 /* dont unmask intr */,
0 /* dont reset intr timer */);
- return rq_work_done;
+ return budget;
}
if (budget > 0)
@@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
return rq_work_done;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return 0;
}
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
};
void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_RAEM_V 0x8
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
static struct platform_device_id fec_devtype[] = {
{
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- /* return value */
- return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
if (fep->clk_enet_out) {
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
} else {
clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
clk_disable_unprepare(fep->clk_ahb);
return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ ret = pm_runtime_get_sync(&fep->pdev->dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret)
- return ret;
+ goto clk_enable;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
fec_enet_free_buffers(ndev);
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
+clk_enable:
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
}
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+
fec_enet_free_buffers(ndev);
return 0;
@@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
- GFP_KERNEL);
+ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
if (!cbd_base) {
return -ENOMEM;
}
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -3400,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
fep->reg_phy = NULL;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
fec_reset_phy(pdev);
if (fep->bufdesc_ex)
@@ -3447,6 +3484,10 @@ fec_probe(struct platform_device *pdev)
fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
failed_register:
@@ -3454,9 +3495,12 @@ failed_register:
failed_mii_init:
failed_irq:
failed_init:
+ fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
failed_phy:
@@ -3473,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- cancel_delayed_work_sync(&fep->time_keep);
cancel_work_sync(&fep->tx_timeout_work);
+ fec_ptp_stop(pdev);
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->ptp_clock)
- ptp_clock_unregister(fep->ptp_clock);
of_node_put(fep->phy_node);
free_netdev(ndev);
@@ -3568,7 +3610,28 @@ failed_clk:
return ret;
}
-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_disable_unprepare(fep->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return clk_prepare_enable(fep->clk_ipg);
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
static struct platform_driver fec_driver = {
.driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_stop(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ cancel_delayed_work_sync(&fep->time_keep);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+}
+
/**
* fec_ptp_check_pps_event
* @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
}
}
-static void lock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_lock(&priv->tx_queue[i]->txlock);
-}
-
-static void unlock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
-}
-
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
- spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- device_init_wakeup(&dev->dev,
- priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&dev->dev, priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
-
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (!netif_running(ndev))
+ return 0;
+
+ disable_napi(priv);
+ netif_tx_lock(ndev);
netif_device_detach(ndev);
+ netif_tx_unlock(ndev);
- if (netif_running(ndev)) {
+ gfar_halt(priv);
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
- gfar_halt_nodisable(priv);
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
- /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ /* re-enable the Rx block */
tempval = gfar_read(&regs->maccfg1);
-
- tempval &= ~MACCFG1_TX_EN;
-
- if (!magic_packet)
- tempval &= ~MACCFG1_RX_EN;
-
+ tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- disable_napi(priv);
-
- if (magic_packet) {
- /* Enable interrupt on Magic Packet */
- gfar_write(&regs->imask, IMASK_MAG);
-
- /* Enable Magic Packet mode */
- tempval = gfar_read(&regs->maccfg2);
- tempval |= MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
- } else {
- phy_stop(priv->phydev);
- }
+ } else {
+ phy_stop(priv->phydev);
}
return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
- if (!netif_running(ndev)) {
- netif_device_attach(ndev);
+ if (!netif_running(ndev))
return 0;
- }
- if (!magic_packet && priv->phydev)
+ if (magic_packet) {
+ /* Disable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+ } else {
phy_start(priv->phydev);
-
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- tempval = gfar_read(&regs->maccfg2);
- tempval &= ~MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
+ }
gfar_start(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
netif_device_attach(ndev);
-
enable_napi(priv);
return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto rx_irq_fail;
}
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2140,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
@@ -2169,8 +2136,6 @@ static int gfar_enet_open(struct net_device *dev)
if (err)
return err;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
-
return err;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
int oldduplex;
int oldlink;
- /* Bitfield update lock */
- spinlock_t bflock;
-
uint32_t msg_enable;
struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
- spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = !!device_may_wakeup(&dev->dev);
- spin_unlock_irqrestore(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
return 0;
}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..830466c49987 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..ae21e0b06c3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..1d7b00b038a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
struct net_device *dev = pp->dev;
- int rx_done, rx_filled;
+ int rx_done;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_todo = rx_done;
rx_done = 0;
- rx_filled = 0;
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
int rx_bytes, err;
rx_done++;
- rx_filled++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
continue;
}
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ goto err_drop_frame;
+ }
+
skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
if (!skb)
goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
-
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
return rx_done;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
+ long ret_wait;
int err = 0;
down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
if (err)
goto out_reset;
- if (!wait_for_completion_timeout(&context->done,
- msecs_to_jiffies(timeout))) {
+ if (op == MLX4_CMD_SENSE_PORT) {
+ ret_wait =
+ wait_for_completion_interruptible_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ if (ret_wait < 0) {
+ context->fw_status = 0;
+ context->out_param = 0;
+ context->result = 0;
+ }
+ } else {
+ ret_wait = (long)wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ }
+ if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
- BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
return ring->prod == ring->cons;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
if (i == mlx4_master_func_num(dev))
continue;
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
} else if (err == -ENOENT) {
err = 0;
continue;
+ } else if (mlx4_is_slave(dev) && err == -EINVAL) {
+ priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
+ mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
+ MLX4_SINK_COUNTER_INDEX(dev));
+ err = 0;
} else {
mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
__func__, port + 1, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
err = set_caps(dev, set_ctx, set_sz);
query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (fw->size & 0xF) {
addr = dest + size;
for (i = 0; i < (fw->size & 0xF); i++)
- data[i] = temp[size + i];
+ data[i] = ((u8 *)temp)[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
struct ravb_desc *desc = NULL;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
- struct sk_buff *skb;
dma_addr_t dma_addr;
- void *buffer;
int i;
priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
memset(priv->rx_ring[q], 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- priv->rx_skb[q][i] = NULL;
- skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
- if (!skb)
- break;
- ravb_set_buffer_align(skb);
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
/* The size of the buffer should be on 16-byte boundary. */
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb(skb);
- break;
- }
- priv->rx_skb[q][i] = skb;
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
- priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
for (i = 0; i < priv->num_tx_ring[q]; i++) {
- priv->tx_skb[q][i] = NULL;
- priv->tx_buffers[q][i] = NULL;
- buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
- if (!buffer)
- break;
- /* Aligned TX buffer */
- priv->tx_buffers[q][i] = buffer;
tx_desc = &priv->tx_ring[q][i];
tx_desc->die_dt = DT_EEMPTY;
}
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
int ring_size;
+ void *buffer;
+ int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
if (!priv->rx_skb[q] || !priv->tx_skb[q])
goto error;
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ goto error;
+ ravb_set_buffer_align(skb);
+ priv->rx_skb[q][i] = skb;
+ }
+
/* Allocate rings for the aligned buffers */
priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
if (!priv->tx_buffers[q])
goto error;
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+ if (!buffer)
+ goto error;
+ /* Aligned TX buffer */
+ priv->tx_buffers[q][i] = buffer;
+ }
+
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (--boguscnt < 0)
break;
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
if (desc_status & MSC_MC)
stats->multicast++;
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
- dma_sync_single_for_cpu(&ndev->dev,
- le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
dma_addr = dma_map_single(&ndev->dev, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb_any(skb);
- break;
- }
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
}
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 dma_addr;
void *buffer;
u32 entry;
- u32 tccr;
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_wmb();
desc->die_dt = DT_FSINGLE;
- tccr = ravb_read(ndev, TCCR);
- if (!(tccr & (TCCR_TSRQ0 << q)))
- ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+ ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
priv->cur_tx[q]++;
if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 847643455468..605cc8948594 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
return resource_size(&efx->pci_dev->resource[bar]);
}
+static bool efx_ef10_is_vf(struct efx_nic *efx)
+{
+ return efx->type->is_vf;
+}
+
static int efx_ef10_get_pf_index(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_probe_vf(struct efx_nic *efx)
{
@@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac_old[ETH_ALEN];
+ int rc, rc2;
+
+ /* Only reconfigure a PF-created vport */
+ if (is_zero_ether_addr(nic_data->vport_mac))
+ return 0;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+
+ rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ if (rc)
+ goto restore_filters;
+
+ ether_addr_copy(mac_old, nic_data->vport_mac);
+ rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ nic_data->vport_mac);
+ if (rc)
+ goto restore_vadaptor;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ if (!rc) {
+ ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
+ } else {
+ rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ if (rc2) {
+ /* Failed to add original MAC, so clear vport_mac */
+ eth_zero_addr(nic_data->vport_mac);
+ goto reset_nic;
+ }
+ }
+
+restore_vadaptor:
+ rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc2)
+ goto reset_nic;
+restore_filters:
+ down_write(&efx->filter_sem);
+ rc2 = efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc2)
+ goto reset_nic;
+
+ rc2 = efx_net_open(efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(efx->net_dev);
+
+ return rc;
+
+reset_nic:
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore when changing MAC address - scheduling reset\n");
+ efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
+
+ return rc ? rc : rc2;
+}
+
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
nic_data->vport_id);
- rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
@@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
-#if !defined(CONFIG_SFC_SRIOV)
- if (rc == -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
-#else
- if (rc == -EPERM) {
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
- /* Switch to PF and change MAC address on vport */
- if (efx->pci_dev->is_virtfn && pci_dev_pf) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ if (rc == -EPERM) {
+ struct efx_nic *efx_pf;
- if (!efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr))
- return 0;
- }
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
- } else if (efx->pci_dev->is_virtfn) {
- /* Successfully changed by VF (with MAC spoofing), so update the
- * parent PF if possible.
- */
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ /* Switch to PF and change MAC address on vport */
+ efx_pf = pci_get_drvdata(pci_dev_pf);
- if (pci_dev_pf) {
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr);
+ } else if (!rc) {
struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
unsigned int i;
+ /* MAC address successfully changed by VF (with MAC
+ * spoofing) so update the parent PF if possible.
+ */
for (i = 0; i < efx_pf->vf_count; ++i) {
struct ef10_vf *vf = nic_data->vf + i;
@@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
}
}
}
- }
+ } else
#endif
+ if (rc == -EPERM) {
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable"
+ " mac-spoofing on this interface\n");
+ } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
+ /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
+ * fall-back to the method of changing the MAC address on the
+ * vport. This only applies to PFs because such versions of
+ * MCFW do not support VFs.
+ */
+ rc = efx_ef10_vport_set_mac_address(efx);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
+ sizeof(inbuf), NULL, 0, rc);
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 6c9b6e45509a..3c17f274e802 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
NULL, 0, NULL);
}
-static int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
-static int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
unsigned int vswitch_type)
{
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
NULL, 0, NULL);
}
-static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
vf->vlan, &vf->vport_id);
if (rc)
- goto reset_nic;
+ goto reset_nic_up_write;
restore_mac:
if (!is_zero_ether_addr(vf->mac)) {
rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
if (rc2) {
eth_zero_addr(vf->mac);
- goto reset_nic;
+ goto reset_nic_up_write;
}
}
restore_evb_port:
rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
else
vf->vport_assigned = 1;
@@ -662,14 +620,16 @@ restore_vadaptor:
if (vf->efx) {
rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
}
restore_filters:
if (vf->efx) {
rc2 = vf->efx->type->filter_table_probe(vf->efx);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
+
+ up_write(&vf->efx->filter_sem);
up_write(&vf->efx->filter_sem);
@@ -681,9 +641,12 @@ restore_filters:
}
return rc;
+reset_nic_up_write:
+ if (vf->efx)
+ up_write(&vf->efx->filter_sem);
+
reset_nic:
if (vf->efx) {
- up_write(&vf->efx->filter_sem);
netif_err(efx, drv, efx->net_dev,
"Failed to restore VF - scheduling reset.\n");
efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index db4ef537c610..6d25b92cb45e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 804b9ad553d3..03bc03b67f08 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
+ struct efx_tx_queue *tx_queue;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
spent = efx_nic_process_eventq(channel, budget);
if (spent && efx_channel_has_rx_queue(channel)) {
struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_fast_push_rx_descriptors(rx_queue, true);
}
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl, tx_queue->bytes_compl);
+ }
+ }
+
return spent;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d72f522bf9c3..47d1e3a96522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -241,6 +241,8 @@ struct efx_tx_queue {
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf2987512b5..1833a0146571 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
/* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
- val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
if (res->mac)
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
- dev_set_drvdata(device, priv);
+ dev_set_drvdata(device, priv->dev);
/* Verify driver arguments */
stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
stmmac_pltfr_resume);
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, len);
- if (!skb_new) {
- rp->tx_errors++;
+ if (!skb_new)
goto out_drop;
- }
kfree_skb(skb);
skb = skb_new;
} else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 462820514fae..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_enable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- enable_irq(priv->irqs_table[i]); \
- } while (0)
-#define cpsw_disable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- disable_irq_nosync(priv->irqs_table[i]); \
- } while (0)
-
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
(func)(slave++, ##arg); \
} while (0)
#define cpsw_get_slave_ndev(priv, __slave_no__) \
- (priv->slaves[__slave_no__].ndev)
+ ((__slave_no__ < priv->data.slaves) ? \
+ priv->slaves[__slave_no__].ndev : NULL)
#define cpsw_get_slave_priv(priv, __slave_no__) \
- ((priv->slaves[__slave_no__].ndev) ? \
+ (((__slave_no__ < priv->data.slaves) && \
+ (priv->slaves[__slave_no__].ndev)) ? \
netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
- cpsw_disable_irq(priv);
+ disable_irq_nosync(priv->irqs_table[0]);
priv->irq_enabled = false;
}
@@ -804,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
- int num_tx, num_rx;
-
- num_tx = cpdma_chan_process(priv->txch, 128);
+ int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -817,13 +804,12 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(priv);
+ enable_irq(priv->irqs_table[0]);
}
}
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_rx)
+ cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
return num_rx;
}
@@ -1333,7 +1319,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (prim_cpsw->irq_enabled == false) {
if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(prim_cpsw);
+ enable_irq(prim_cpsw->irqs_table[0]);
}
}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
/* SGMII functions */
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
}
mutex_unlock(&netcp_modules_lock);
- netcp_rxpool_refill(netcp);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
knav_queue_enable_notify(netcp->tx_compl_q);
knav_queue_enable_notify(netcp->rx_queue);
+ netcp_rxpool_refill(netcp);
netif_tx_wake_all_queues(ndev);
dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
return 0;
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2103,7 @@ probe_quit:
static int netcp_remove(struct platform_device *pdev)
{
struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+ struct netcp_intf *netcp_intf, *netcp_tmp;
struct netcp_inst_modpriv *inst_modpriv, *tmp;
struct netcp_module *module;
@@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
list_del(&inst_modpriv->inst_list);
kfree(inst_modpriv);
}
- WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
- pdev->name);
- devm_kfree(&pdev->dev, netcp_device);
+ /* now that all modules are removed, clean up the interfaces */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+ WARN(!list_empty(&netcp_device->interface_head),
+ "%s interface list not empty!\n", pdev->name);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
}
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+ struct gbe_slave *slave, bool set)
+{
+ void __iomem *sgmii_port_regs;
+
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
+
+ if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+ sgmii_port_regs = priv->sgmii_port34_regs;
+ else
+ sgmii_port_regs = priv->sgmii_port_regs;
+
+ netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+}
+
static void gbe_slave_stop(struct gbe_intf *intf)
{
struct gbe_priv *gbe_dev = intf->gbe_dev;
struct gbe_slave *slave = intf->slave;
+ gbe_sgmii_rtreset(gbe_dev, slave, true);
gbe_port_reset(slave);
/* Disable forwarding */
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
gbe_sgmii_config(priv, slave);
gbe_port_reset(slave);
+ gbe_sgmii_rtreset(priv, slave, false);
gbe_port_config(priv, slave, priv->rx_packet_max);
gbe_set_slave_mac(slave, gbe_intf);
/* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
{
struct gbe_slave *slave;
- for (;;) {
+ while (!list_empty(&gbe_dev->secondary_slaves)) {
slave = first_sec_slave(gbe_dev);
- if (!slave)
- break;
+
if (slave->phy)
phy_disconnect(slave->phy);
list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
&gbe_dev->dma_chan_name);
if (ret < 0) {
dev_err(dev, "missing \"tx-channel\" parameter\n");
- ret = -ENODEV;
- goto quit;
+ return -EINVAL;
}
if (!strcmp(node->name, "gbe")) {
ret = get_gbe_resource_version(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
else
ret = -ENODEV;
- if (ret)
- goto quit;
} else if (!strcmp(node->name, "xgbe")) {
ret = set_xgbe_ethss10_priv(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
gbe_dev->ss_regs);
- if (ret)
- goto quit;
} else {
dev_err(dev, "unknown GBE node(%s)\n", node->name);
ret = -ENODEV;
- goto quit;
}
+ if (ret)
+ return ret;
+
interfaces = of_get_child_by_name(node, "interfaces");
if (!interfaces)
dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
if (ret)
- goto quit;
+ return ret;
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
if (ret)
- goto quit;
+ return ret;
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
break;
}
+ of_node_put(interfaces);
if (!gbe_dev->num_slaves)
dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
of_node_put(secondary_ports);
if (!gbe_dev->num_slaves) {
- dev_err(dev, "No network interface or secondary ports configured\n");
+ dev_err(dev,
+ "No network interface or secondary ports configured\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
}
memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!gbe_dev->ale) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
} else {
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
}
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
*inst_priv = gbe_dev;
return 0;
-quit:
- if (gbe_dev->hw_stats)
- devm_kfree(dev, gbe_dev->hw_stats);
- cpsw_ale_destroy(gbe_dev->ale);
- if (gbe_dev->ss_regs)
- devm_iounmap(dev, gbe_dev->ss_regs);
- of_node_put(interfaces);
- devm_kfree(dev, gbe_dev);
+free_sec_ports:
+ free_secondary_ports(gbe_dev);
return ret;
}
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
free_secondary_ports(gbe_dev);
if (!list_empty(&gbe_dev->gbe_intf_head))
- dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+ dev_alert(gbe_dev->dev,
+ "unreleased ethss interfaces present\n");
- devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
- devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
- memset(gbe_dev, 0x00, sizeof(*gbe_dev));
- devm_kfree(gbe_dev->dev, gbe_dev);
return 0;
}
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
#include "netcp.h"
+#define SGMII_SRESET_RESET BIT(0)
+#define SGMII_SRESET_RTRESET BIT(1)
+
#define SGMII_REG_STATUS_LOCK BIT(4)
#define SGMII_REG_STATUS_LINK BIT(0)
#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
{
/* Soft reset */
- sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
- while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+ sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+ SGMII_SRESET_RESET);
+
+ while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+ SGMII_SRESET_RESET) != 0x0)
;
+
return 0;
}
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+ u32 reg;
+ bool oldval;
+
+ /* Initiate a soft reset */
+ reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+ oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+ if (set)
+ reg |= SGMII_SRESET_RTRESET;
+ else
+ reg &= ~SGMII_SRESET_RTRESET;
+ sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+ wmb();
+
+ return oldval;
+}
+
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
{
u32 status = 0, link = 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4208dd7ef101..d95f9aae95e7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
- if (!lp->regs) {
+ if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->regs);
goto free_netdev;
}
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
- if (!lp->dma_regs) {
+ if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->dma_regs);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 7856b6ccf5c5..d95a50ae996d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
dev->flags = 0;
+ dev->features = NETIF_F_LLTX; /* Allow recursion */
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
- if ((err = ax_open(ax->dev))) {
+ err = ax_open(ax->dev);
+ if (err)
goto out_free_netdev;
- }
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
struct ipvl_port *port;
struct net_device *phy_dev;
struct list_head addrs;
- int ipv4cnt;
- int ipv6cnt;
struct ipvl_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
return rcu_dereference(d->rx_handler_data);
}
+static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
+{
+ return rcu_dereference_bh(d->rx_handler_data);
+}
+
static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
{
return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+void ipvlan_ht_addr_del(struct ipvl_addr *addr)
{
hlist_del_init_rcu(&addr->hlnode);
- if (sync)
- synchronize_rcu();
}
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+ struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
if (!port)
goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_add(ipvlan, addr);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+
return dev_uc_add(phy_dev, phy_dev->dev_addr);
}
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_del(phy_dev, phy_dev->dev_addr);
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr);
+
return 0;
}
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
INIT_LIST_HEAD(&ipvlan->addrs);
- ipvlan->ipv4cnt = 0;
- ipvlan->ipv6cnt = 0;
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- list_del(&addr->anode);
- }
+ list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+ ipvlan_ht_addr_del(addr);
+ list_del(&addr->anode);
+ kfree_rcu(addr, rcu);
}
+
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv6cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv6cnt--;
- WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ /* FIXME IPv6 autoconf calls us from bh without RTNL */
+ if (in_softirq())
+ return NOTIFY_DONE;
+
if (!netif_is_ipvlan(dev))
return NOTIFY_DONE;
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv4cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv4cnt--;
- WARN_ON(ipvlan->ipv4cnt < 0);
kfree_rcu(addr, rcu);
return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index f8370808a018..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
int copylen = 0;
+ int depth;
bool zerocopy = false;
size_t linear;
ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_probe_transport_header(skb, ETH_HLEN);
+ /* Move network header to the right position for VLAN tagged packets */
+ if ((skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) &&
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
/* copy skb_ubuf_info for callback when skb has no error */
@@ -1355,6 +1362,7 @@ static void macvtap_exit(void)
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+ idr_destroy(&minor_idr);
}
module_exit(macvtap_exit);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+ if (len < 0) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_length_errors++;
+ goto enqueue_again;
+ }
+
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
return;
}
+enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
- if (rc == -EINVAL) {
+ if (rc) {
dev_kfree_skb(skb);
goto err;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cf18940f4e84..cb86d7a01542 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO
config MDIO_BUS_MUX_MMIOREG
tristate "Support for MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO
+ depends on OF_MDIO && HAS_IOMEM
select MDIO_BUS_MUX
help
This module provides a driver for MDIO bus multiplexers that
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
- if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
if (of_driver_match_device(dev, drv))
return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return (phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask);
+ if (phydev->is_c45) {
+ for (i = 1; i < num_ids; i++) {
+ if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ continue;
+
+ if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->c45_ids.device_ids[i] &
+ phydrv->phy_id_mask))
+ return 1;
+ }
+ return 0;
+ } else {
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
+ }
}
#ifdef CONFIG_PM
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
+ int old_link;
mutex_lock(&phydev->lock);
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
+ /* Only register a CHANGE if we are polling or ignoring
+ * interrupts and link changed since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ if (!phy_interrupt_is_valid(phydev)) {
+ old_link = phydev->link;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (old_link != phydev->link)
+ phydev->state = PHY_CHANGELINK;
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
}
/*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down. If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
+ int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
if (rc < 0)
return rc;
- /* Sleep 64 ms to allow ~5 link test pulses to be sent */
- msleep(64);
+ /* Wait max 640 ms to detect energy */
+ for (i = 0; i < 64; i++) {
+ /* Sleep to allow link test pulses to be sent */
+ msleep(10);
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+ if (rc & MII_LAN83C185_ENERGYON)
+ break;
+ }
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, &err);
+ ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
- ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
+ struct ppp *ppp;
+ LIST_HEAD(list);
+ int id;
+
+ rtnl_lock();
+ idr_for_each_entry(&pn->units_idr, ppp, id)
+ unregister_netdevice_queue(ppp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
idr_destroy(&pn->units_idr);
}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
return 0;
}
+static void ppp_dev_uninit(struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+ ppp_lock(ppp);
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ unit_put(&pn->units_idr, ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ ppp->owner = NULL;
+
+ ppp->file.dead = 1;
+ wake_up_interruptible(&ppp->file.rwait);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
}
/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
- struct ppp_net *pn;
-
- pn = ppp_pernet(ppp->ppp_net);
- mutex_lock(&pn->all_ppp_mutex);
-
- /* This will call dev_close() for us. */
- ppp_lock(ppp);
- if (!ppp->closing) {
- ppp->closing = 1;
- ppp_unlock(ppp);
- unregister_netdev(ppp->dev);
- unit_put(&pn->units_idr, ppp->file.index);
- } else
- ppp_unlock(ppp);
-
- ppp->file.dead = 1;
- ppp->owner = NULL;
- wake_up_interruptible(&ppp->file.rwait);
-
- mutex_unlock(&pn->all_ppp_mutex);
-}
-
-/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4545e78840b0..35a2bffe848a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define NVIDIA_VENDOR_ID 0x0955
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e4b7a47a825c..efc18e05af0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
- ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
if (ret)
goto err;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8067b8fbb0ee..db40175b1a0b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -6,7 +6,7 @@
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
* USB Host Driver for Network Control Model (NCM)
- * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
*
* The NCM encoding, decoding and initialization logic
* derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
+ kfree(ctx->delayed_ndp16);
+
kfree(ctx);
}
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
+ /* Device-specific flags */
+ ctx->drvflags = drvflags;
+
+ /* Allocate the delayed NDP if needed. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
+ }
+
/* override ethtool_ops */
dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
- /* The NCM data altsetting is fixed */
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
+ /* The NCM data altsetting is fixed, so we hard-coded it.
+ * Additionally, generic NCM devices are assumed to accept arbitrarily
+ * placed NDP.
+ */
+ ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
/*
* We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ if (ctx->delayed_ndp16->dwSignature == sign)
+ return ctx->delayed_ndp16;
+
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
/* align new NDP */
- cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */
if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ else
+ ndp16 = ctx->delayed_ndp16;
+
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
+ u32 delayed_ndp_size;
+
+ /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
+ * accordingly. Otherwise, we should check here.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ delayed_ndp_size = ctx->max_ndp_size;
+ else
+ delayed_ndp_size = 0;
/* if there is a remaining skb, it gets priority */
if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
+ if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
+ /* If requested, put NDP at end of frame. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ }
+
/* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 735f7dadb9a0..2680a65cd5e4 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret = -ENODEV;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ int drvflags = 0;
/* altsetting should always be 1 for NCM devices - so we hard-coded
- * it here
+ * it here. Some huawei devices will need the NDP part of the NCM package to
+ * be at the end of the frame.
*/
- ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+ drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+ ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
@@ -784,6 +785,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index aafa1a1898e4..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
#include <linux/usb/cdc.h>
/* Version Information */
-#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
+#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -494,6 +494,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -1901,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- int i;
netif_warn(tp, tx_err, netdev, "Tx timeout\n");
- for (i = 0; i < RTL8152_MAX_TX; i++)
- usb_unlink_urb(tp->tx_info[i].urb);
+
+ usb_queue_reset_device(tp->intf);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2074,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
{
int i, ret = 0;
- napi_disable(&tp->napi);
INIT_LIST_HEAD(&tp->rx_done);
for (i = 0; i < RTL8152_MAX_RX; i++) {
INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2082,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
if (ret)
break;
}
- napi_enable(&tp->napi);
if (ret && ++i < RTL8152_MAX_RX) {
struct list_head rx_queue;
@@ -2165,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
+ usb_disable_lpm(tp->udev);
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
r8153_set_rx_early_timeout(tp);
@@ -2336,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
device_set_wakeup_enable(&tp->udev->dev, false);
}
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
+{
+ u8 u1u2[8];
+
+ if (enable)
+ memset(u1u2, 0xff, sizeof(u1u2));
+ else
+ memset(u1u2, 0x00, sizeof(u1u2));
+
+ usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
+ ocp_data |= U2P3_ENABLE;
+ else
+ ocp_data &= ~U2P3_ENABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~(PWR_EN | PHASE2_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static bool rtl_can_wakeup(struct r8152 *tp)
+{
+ struct usb_device *udev = tp->udev;
+
+ return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
+}
+
static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
{
if (enable) {
u32 ocp_data;
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2352,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
__rtl_set_wol(tp, tp->saved_wolopts);
+ r8153_u2p3en(tp, true);
+ r8153_u1u2en(tp, true);
}
}
@@ -2598,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, bool enable)
-{
- u8 u1u2[8];
-
- if (enable)
- memset(u1u2, 0xff, sizeof(u1u2));
- else
- memset(u1u2, 0x00, sizeof(u1u2));
-
- usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
-}
-
-static void r8153_u2p3en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
- if (enable)
- ocp_data |= U2P3_ENABLE;
- else
- ocp_data &= ~U2P3_ENABLE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
-}
-
-static void r8153_power_cut_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
- if (enable)
- ocp_data |= PWR_EN | PHASE2_EN;
- else
- ocp_data &= ~(PWR_EN | PHASE2_EN);
- ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- ocp_data &= ~PCUT_STATUS;
- ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
@@ -2780,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
r8153_disable_aldps(tp);
rtl_disable(tp);
r8153_enable_aldps(tp);
+ usb_enable_lpm(tp->udev);
}
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2900,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+ r8153_u1u2en(tp, false);
r8153_disable_aldps(tp);
r8153_first_init(tp);
r8153_enable_aldps(tp);
+ r8153_u2p3en(tp, true);
+ r8153_u1u2en(tp, true);
+ usb_enable_lpm(tp->udev);
}
static void rtl8153_down(struct r8152 *tp)
@@ -2913,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
}
r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
@@ -2931,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
+ napi_enable(&tp->napi);
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3251,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
+ usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
if (tp->version == RTL_VER_04) {
@@ -3318,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
rtl_tally_reset(tp);
+ r8153_u2p3en(tp, true);
+}
+
+static int rtl8152_pre_reset(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev;
+
+ if (!tp)
+ return 0;
+
+ netdev = tp->netdev;
+ if (!netif_running(netdev))
+ return 0;
+
+ napi_disable(&tp->napi);
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (netif_carrier_ok(netdev)) {
+ netif_stop_queue(netdev);
+ mutex_lock(&tp->control);
+ tp->rtl_ops.disable(tp);
+ mutex_unlock(&tp->control);
+ }
+
+ return 0;
+}
+
+static int rtl8152_post_reset(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev;
+
+ if (!tp)
+ return 0;
+
+ netdev = tp->netdev;
+ if (!netif_running(netdev))
+ return 0;
+
+ set_bit(WORK_ENABLE, &tp->flags);
+ if (netif_carrier_ok(netdev)) {
+ mutex_lock(&tp->control);
+ tp->rtl_ops.enable(tp);
+ rtl8152_set_rx_mode(netdev);
+ mutex_unlock(&tp->control);
+ netif_wake_queue(netdev);
+ }
+
+ napi_enable(&tp->napi);
+
+ return 0;
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3373,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
+ napi_enable(&tp->napi);
} else {
tp->rtl_ops.up(tp);
rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3402,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (usb_autopm_get_interface(tp->intf) < 0)
return;
- mutex_lock(&tp->control);
-
- wol->supported = WAKE_ANY;
- wol->wolopts = __rtl_get_wol(tp);
-
- mutex_unlock(&tp->control);
+ if (!rtl_can_wakeup(tp)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ mutex_lock(&tp->control);
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+ mutex_unlock(&tp->control);
+ }
usb_autopm_put_interface(tp->intf);
}
@@ -3417,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct r8152 *tp = netdev_priv(dev);
int ret;
+ if (!rtl_can_wakeup(tp))
+ return -EOPNOTSUPP;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
@@ -4058,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+
tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
@@ -4117,6 +4201,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
@@ -4130,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .pre_reset = rtl8152_pre_reset,
+ .post_reset = rtl8152_post_reset,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
- if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da11bb5e9c7f..46f4caddccbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
static const u32 rxprod_reg[2] = {
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
- u32 num_rxd = 0;
+ u32 num_pkts = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
struct vmxnet3_cmd_ring *ring = NULL;
- if (num_rxd >= quota) {
+ if (num_pkts >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
- num_rxd++;
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
+ num_pkts++;
}
rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
- return num_rxd;
+ return num_pkts;
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma;
- if (register_hdlc_device(chan->netdev)) {
+ err = register_hdlc_device(chan->netdev);
+ if (err) {
netdev_warn(chan->netdev,
"register_hdlc_device() failed\n");
free_netdev(chan->netdev);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index feacc3b994b7..2f0bd6955f33 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
* @dev: The network device to attach
* @c: The Z8530 channel to configure in sync DMA mode.
*
- * Set up a Z85x30 device for synchronous DMA tranmission. One
+ * Set up a Z85x30 device for synchronous DMA transmission. One
* ISA DMA channel must be available for this to work. The receive
* side is run in PIO mode, but then it has the bigger FIFO.
*/
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
return;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
switch (phy->rev) {
case 6:
case 5:
- if (sprom->fem.ghz5.extpa_gain == 3)
+ if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
/* fall through */
case 4:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
hw_addr = (const u8 *)(mac_override +
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
- /* The byte order is little endian 16 bit, meaning 214365 */
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
+ /*
+ * Store the MAC address from MAO section.
+ * No byte swapping is required in MAO section
+ */
+ memcpy(data->hw_addr, hw_addr, ETH_ALEN);
/*
* Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
* can be preempted by other scan requests with higher priority.
- * The low priority scan is aborted.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
* when scan starts.
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
- if (iwl_mvm_scan_total_iterations(params) == 0)
+ if (iwl_mvm_scan_total_iterations(params) == 1)
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
+ if (type == IWL_MVM_SCAN_SCHED)
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
int ret;
+ static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
lockdep_assert_held(&mvm->mutex);
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta->addr, ret);
+ sta ? sta->addr : zero_addr, ret);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
{
lockdep_assert_held(&mvm->time_event_lock);
- if (te_data->id == TE_MAX)
+ if (!te_data->vif)
return;
list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
if (info->band == IEEE80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
- rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+ rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
rate_flags =
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
-/*
- * RX related structures and functions
- */
-#define RX_NUM_QUEUES 1
-#define RX_POST_REQ_ALLOC 2
-#define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
-
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
- * @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
- * @rx_free: list of RBDs with allocated RB ready for use
- * @rx_used: list of RBDs with no RB attached
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
- u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-};
-
-/**
- * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
- * @req_pending: number of requests the allcator had not processed yet
- * @req_ready: number of requests honored and ready for claiming
- * @rbd_allocated: RBDs with pages allocated and ready to be handled to
- * the queue. This is a list of &struct iwl_rx_mem_buffer
- * @rbd_empty: RBDs with no page attached for allocator use. This is a list
- * of &struct iwl_rx_mem_buffer
- * @lock: protects the rbd_allocated and rbd_empty lists
- * @alloc_wq: work queue for background calls
- * @rx_alloc: work struct for background calls
- */
-struct iwl_rb_allocator {
- struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
- atomic_t req_pending;
- atomic_t req_ready;
- struct list_head rbd_allocated;
- struct list_head rbd_empty;
- spinlock_t lock;
- struct workqueue_struct *alloc_wq;
- struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
- * @rba: allocator for RX replenishing
+ * @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
- struct iwl_rb_allocator rba;
+ struct work_struct rx_replenish;
struct iwl_trans *trans;
struct iwl_drv *drv;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
- * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
- * When the interrupt handler is called, the request is processed.
- * The page is either stolen - transferred to the upper layer
- * or reused - added immediately to the iwl->rxq->rx_free list.
- * + When the page is stolen - the driver updates the matching queue's used
- * count, detaches the RBD and transfers it to the queue used list.
- * When there are two used RBDs - they are transferred to the allocator empty
- * list. Work is then scheduled for the allocator to start allocating
- * eight buffers.
- * When there are another 6 used RBDs - they are transferred to the allocator
- * empty list and the driver tries to claim the pre-allocated buffers and
- * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
- * until ready.
- * When there are 8+ buffers in the free list - either from allocation or from
- * 8 reused unstolen pages - restock is called to update the FW and indexes.
- * + In order to make sure the allocator always has RBDs to use for allocation
- * the allocator has initial pool in the size of num_queues*(8-2) - the
- * maximum missing RBDs per allocation request (request posted with 2
- * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
- * The queues supplies the recycle of the rest of the RBDs.
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + If there are no allocated buffers in iwl->rxq->rx_free,
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
@@ -105,32 +92,18 @@
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock.
- * Used only during initialization.
+ * iwl_pcie_rxq_restock
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
- * the WRITE index.
- * iwl_pcie_rx_allocator() Background work for allocating pages.
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_pcie_rx_replenish
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
- * Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
- *
- * RBD life-cycle:
- *
- * Init:
- * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
- *
- * Regular Receive interrupt:
- * Page Stolen:
- * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
- * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
- * Page not Stolen:
- * rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->free_count--;
}
spin_unlock(&rxq->lock);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
}
/*
- * iwl_pcie_rx_alloc_page - allocates and returns a page.
- *
- */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct page *page;
- gfp_t gfp_mask = GFP_KERNEL;
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
- trans_pcie->rx_page_order);
- /* Issue an error if the hardware has consumed more than half
- * of its free buffer list and we don't have enough
- * pre-allocated buffers.
-` */
- if (rxq->free_count <= RX_LOW_WATERMARK &&
- iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
- net_ratelimit())
- IWL_CRIT(trans,
- "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
- rxq->free_count);
- return NULL;
- }
- return page;
-}
-
-/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
+ gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
}
spin_unlock(&rxq->lock);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
- if (!page)
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+ "order: %d\n",
+ trans_pcie->rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
return;
+ }
spin_lock(&rxq->lock);
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
lockdep_assert_held(&rxq->lock);
- for (i = 0; i < RX_QUEUE_SIZE; i++) {
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
+ * This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
-/*
- * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
- *
- * Allocates for each received request 8 pages
- * Called as a scheduled work item.
- */
-static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
- while (atomic_read(&rba->req_pending)) {
- int i;
- struct list_head local_empty;
- struct list_head local_allocated;
-
- INIT_LIST_HEAD(&local_allocated);
- spin_lock(&rba->lock);
- /* swap out the entire rba->rbd_empty to a local list */
- list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
-
- for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
-
- /* List should never be empty - each reused RBD is
- * returned to the list, and initial pool covers any
- * possible gap between the time the page is allocated
- * to the time the RBD is added.
- */
- BUG_ON(list_empty(&local_empty));
- /* Get the first rxb from the rbd list */
- rxb = list_first_entry(&local_empty,
- struct iwl_rx_mem_buffer, list);
- BUG_ON(rxb->page);
-
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
- if (!page)
- continue;
- rxb->page = page;
-
- /* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, rxb->page_dma)) {
- rxb->page = NULL;
- __free_pages(page, trans_pcie->rx_page_order);
- continue;
- }
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- /* move the allocated entry to the out list */
- list_move(&rxb->list, &local_allocated);
- i++;
- }
-
- spin_lock(&rba->lock);
- /* add the allocated rbds to the allocator allocated list */
- list_splice_tail(&local_allocated, &rba->rbd_allocated);
- /* add the unused rbds back to the allocator empty list */
- list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- atomic_dec(&rba->req_pending);
- atomic_inc(&rba->req_ready);
- }
-}
-
-/*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
-.*
-.* Called by queue when the queue posted allocation request and
- * has freed 8 RBDs in order to restock itself.
- */
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer
- *out[RX_CLAIM_REQ_ALLOC])
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- if (atomic_dec_return(&rba->req_ready) < 0) {
- atomic_inc(&rba->req_ready);
- IWL_DEBUG_RX(trans,
- "Allocation request not ready, pending requests = %d\n",
- atomic_read(&rba->req_pending));
- return -ENOMEM;
- }
-
- spin_lock(&rba->lock);
- for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
- /* Get next free Rx buffer, remove it from free list */
- out[i] = list_first_entry(&rba->rbd_allocated,
- struct iwl_rx_mem_buffer, list);
- list_del(&out[i]->list);
- }
- spin_unlock(&rba->lock);
-
- return 0;
-}
-
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
{
- struct iwl_rb_allocator *rba_p =
- container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
- container_of(rba_p, struct iwl_trans_pcie, rba);
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_allocator(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
- spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
- rxq->used_count = 0;
- for (i = 0; i < RX_QUEUE_SIZE; i++)
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
- int i;
-
- lockdep_assert_held(&rba->lock);
-
- INIT_LIST_HEAD(&rba->rbd_allocated);
- INIT_LIST_HEAD(&rba->rbd_empty);
-
- for (i = 0; i < RX_POOL_SIZE; i++)
- list_add(&rba->pool[i].list, &rba->rbd_empty);
-}
-
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- lockdep_assert_held(&rba->lock);
-
- for (i = 0; i < RX_POOL_SIZE; i++) {
- if (!rba->pool[i].page)
- continue;
- dma_unmap_page(trans->dev, rba->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
- rba->pool[i].page = NULL;
- }
-}
-
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (err)
return err;
}
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
-
- spin_lock(&rba->lock);
- atomic_set(&rba->req_pending, 0);
- atomic_set(&rba->req_ready, 0);
- /* free all first - we might be reconfigured for a different size */
- iwl_pcie_rx_free_rba(trans);
- iwl_pcie_rx_init_rba(rba);
- spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
- cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
-
- spin_lock(&rba->lock);
- iwl_pcie_rx_free_rba(trans);
- spin_unlock(&rba->lock);
+ cancel_work_sync(&trans_pcie->rx_replenish);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts = NULL;
}
-/*
- * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
- *
- * Called when a RBD can be reused. The RBD is transferred to the allocator.
- * When there are 2 empty RBDs - a request for allocation is posted
- */
-static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
- /* Count the used RBDs */
- rxq->used_count++;
-
- /* Move the RBD to the used list, will be moved to allocator in batches
- * before claiming or posting a request*/
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- /* If we have RX_POST_REQ_ALLOC new released rx buffers -
- * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
- * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
- * after but we still need to post another request.
- */
- if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
- /* Move the 2 RBDs to the allocator ownership.
- Allocator has another 6 from pool for the request completion*/
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- atomic_inc(&rba->req_pending);
- queue_work(rba->alloc_wq, &rba->rx_alloc);
- }
-}
-
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ list_add_tail(&rxb->list, &rxq->rx_used);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ list_add_tail(&rxb->list, &rxq->rx_used);
}
/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i, j;
+ u32 r, i;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
restart:
spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
@@ -968,48 +739,29 @@ restart:
iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
-
- /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
- * try to claim the pre-allocated buffers from the allocator */
- if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
- /* Add the remaining 6 empty RBDs for allocator use */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-
- /* If not ready - continue, will try to reclaim later.
- * No need to reschedule work - allocator exits only on
- * success */
- if (!iwl_pcie_rx_allocator_get(trans, out)) {
- /* If success - then RX_CLAIM_REQ_ALLOC
- * buffers were retrieved and should be added
- * to free list */
- rxq->used_count -= RX_CLAIM_REQ_ALLOC;
- for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
- list_add_tail(&out[j]->list,
- &rxq->rx_free);
- rxq->free_count++;
- }
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ count = 0;
+ goto restart;
}
}
- /* handle restock for two cases:
- * - we just pulled buffers from the allocator
- * - we have 8+ unstolen pages accumulated */
- if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rxq_restock(trans);
- goto restart;
- }
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
+ if (fill_rx)
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ else
+ iwl_pcie_rxq_restock(trans);
+
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
- if (!trans->cfg->apmg_not_supported)
+ if (trans->cfg->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
mdelay(5);
}
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ msleep(1);
+
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ if (ret >= 0) {
+ ret = 0;
+ goto out;
+ }
usleep_range(200, 1000);
t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_ERR(trans, "Couldn't prepare the card\n");
+out:
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
return ret;
}
@@ -2459,7 +2475,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
u16 pci_cmd;
- int err;
+ int ret;
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2490,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->ref_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- err = pci_enable_device(pdev);
- if (err)
+ ret = pci_enable_device(pdev);
+ if (ret)
goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2507,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
- if (err) {
+ if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto out_pci_disable_device;
}
}
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
goto out_pci_disable_device;
}
@@ -2515,7 +2531,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
- err = -ENODEV;
+ ret = -ENODEV;
goto out_pci_release_regions;
}
@@ -2527,9 +2543,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- err = pci_enable_msi(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2563,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
*/
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
unsigned long flags;
- int ret;
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ ret = iwl_pcie_prepare_card_hw(trans);
+ if (ret) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ goto out_pci_disable_msi;
+ }
+
/*
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
@@ -2591,13 +2612,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- if (iwl_pcie_alloc_ict(trans))
+ ret = iwl_pcie_alloc_ict(trans);
+ if (ret)
goto out_pci_disable_msi;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
- if (err) {
+ if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
@@ -2617,5 +2639,5 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
iwl_trans_free(trans);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr) {
- if (txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ if (txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
+ }
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
iwl_trans_pcie_ref(trans);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
+ const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct rtl_tcb_desc tcb_desc;
- if (skb)
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+ if (skb) {
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+ }
}
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
+
+ /* Wake the dealloc thread _after_ decrementing inflight_packets so
+ * that if kthread_stop() has already been called, the dealloc thread
+ * does not wait forever with nothing to wake it.
+ */
+ wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
@@ -1566,13 +1570,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
pending_idx =
queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-queue->tx_unmap_ops] =
+ pending_idx_release[gop - queue->tx_unmap_ops] =
pending_idx;
- queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
- dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+ dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL;
ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
+ struct list_head rx_post_q;
struct list_head rx_pend_q;
struct list_head rx_free_q;
- spinlock_t ntb_rx_pend_q_lock;
- spinlock_t ntb_rx_free_q_lock;
+ /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+ spinlock_t ntb_rx_q_lock;
void *rx_buff;
unsigned int rx_index;
unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
+
+ struct dentry *debugfs_node_dir;
};
enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
char *buf;
ssize_t ret, out_offset, out_count;
+ qp = filp->private_data;
+
+ if (!qp || !qp->link_is_up)
+ return 0;
+
out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- qp = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
return entry;
}
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+ struct list_head *list,
+ struct list_head *to_list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ if (list_empty(list)) {
+ entry = NULL;
+ } else {
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_move_tail(&entry->entry, to_list);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
- unsigned int size)
+ resource_size_t size)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
- unsigned int xlat_size, buff_size;
+ size_t xlat_size, buff_size;
int rc;
+ if (!size)
+ return -EINVAL;
+
xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align);
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
if (!mw->virt_addr) {
mw->xlat_size = 0;
mw->buff_size = 0;
- dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+ dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
buff_size);
return -ENOMEM;
}
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
+
+ tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (nt_debugfs_dir) {
+ if (nt->debugfs_node_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- nt_debugfs_dir);
+ nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
- spin_lock_init(&qp->ntb_rx_pend_q_lock);
- spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock);
+ INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
goto err2;
}
+ if (nt_debugfs_dir) {
+ nt->debugfs_node_dir =
+ debugfs_create_dir(pci_name(ndev->pdev),
+ nt_debugfs_dir);
+ }
+
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
kfree(nt);
}
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
{
- struct ntb_queue_entry *entry = data;
- struct ntb_transport_qp *qp = entry->qp;
- void *cb_data = entry->cb_data;
- unsigned int len = entry->len;
- struct ntb_payload_header *hdr = entry->rx_hdr;
+ struct ntb_queue_entry *entry;
+ void *cb_data;
+ unsigned int len;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+ while (!list_empty(&qp->rx_post_q)) {
+ entry = list_first_entry(&qp->rx_post_q,
+ struct ntb_queue_entry, entry);
+ if (!(entry->flags & DESC_DONE_FLAG))
+ break;
+
+ entry->rx_hdr->flags = 0;
+ iowrite32(entry->index, &qp->rx_info->entry);
- hdr->flags = 0;
+ cb_data = entry->cb_data;
+ len = entry->len;
- iowrite32(entry->index, &qp->rx_info->entry);
+ list_move_tail(&entry->entry, &qp->rx_free_q);
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
- if (qp->rx_handler && qp->client_ready)
- qp->rx_handler(qp, qp->cb_data, cb_data, len);
+ if (qp->rx_handler && qp->client_ready)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+ }
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+ struct ntb_queue_entry *entry = data;
+
+ entry->flags |= DESC_DONE_FLAG;
+
+ ntb_complete_rxc(entry->qp);
}
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
ntb_rx_copy_callback(entry);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
- size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
- size_t pay_off, buff_off;
+ size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- entry->len = len;
+ len = entry->len;
if (!chan)
goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
- int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
return -EIO;
}
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
-
- rc = -ENOMEM;
- goto err;
+ return -EAGAIN;
}
+ entry->rx_hdr = hdr;
+ entry->index = qp->rx_index;
+
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
- rc = -EIO;
- goto err;
- }
+ entry->len = -EIO;
+ entry->flags |= DESC_DONE_FLAG;
- dev_dbg(&qp->ndev->pdev->dev,
- "RX OK index %u ver %u size %d into buf size %d\n",
- qp->rx_index, hdr->ver, hdr->len, entry->len);
+ ntb_complete_rxc(qp);
+ } else {
+ dev_dbg(&qp->ndev->pdev->dev,
+ "RX OK index %u ver %u size %d into buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
- entry->index = qp->rx_index;
- entry->rx_hdr = hdr;
+ entry->len = hdr->len;
- ntb_async_rx(entry, offset, hdr->len);
+ ntb_async_rx(entry, offset);
+ }
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
-
-err:
- /* FIXME: if this syncrhonous update of the rx_index gets ahead of
- * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
- * scenarios:
- *
- * 1) The peer might miss this update, but observe the update
- * from the memcpy completion callback. In this case, the buffer will
- * not be freed on the peer to be reused for a different packet. The
- * successful rx of a later packet would clear the condition, but the
- * condition could persist if several rx fail in a row.
- *
- * 2) The peer may observe this update before the asyncrhonous copy of
- * prior packets is completed. The peer may overwrite the buffers of
- * the prior packets before they are copied.
- *
- * 3) Both: the peer may observe the update, and then observe the index
- * decrement by the asynchronous completion callback. Who knows what
- * badness that will cause.
- */
- hdr->flags = 0;
- iowrite32(qp->rx_index, &qp->rx_info->entry);
-
- return rc;
}
static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
break;
}
- if (qp->dma_chan)
+ if (i && qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
goto err1;
entry->qp = qp;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
@@ -1634,7 +1674,7 @@ err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->dma_chan)
dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
qp->tx_handler = NULL;
qp->event_handler = NULL;
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
- while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
- dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+ kfree(entry);
+ }
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
- nt->qp_bitmap_free |= qp_bit;
+ qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
if (!qp || qp->client_ready)
return NULL;
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry)
return NULL;
buf = entry->cb_data;
*len = entry->len;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf;
}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
if (!qp)
return -EINVAL;
- entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry)
return -ENOMEM;
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
+ entry->flags = 0;
+
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+ tasklet_schedule(&qp->rxc_db_work);
return 0;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nvdimm_bus_unlock(dev);
}
if (is_nd_btt(dev) && probe) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev &&
+ is_nd_blk(dev->parent))
+ nd_region_create_blk_seed(nd_region);
nvdimm_bus_unlock(dev);
}
}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
config OF_ADDRESS
def_bool y
- depends on !SPARC
+ depends on !SPARC && HAS_IOMEM
select OF_ADDRESS_PCI if PCI
config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
.remove = unittest_remove,
.driver = {
.name = "unittest",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(unittest_match),
},
};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
static struct i2c_driver unittest_i2c_dev_driver = {
.driver = {
.name = "unittest-i2c-dev",
- .owner = THIS_MODULE,
},
.probe = unittest_i2c_dev_probe,
.remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
static struct i2c_driver unittest_i2c_mux_driver = {
.driver = {
.name = "unittest-i2c-mux",
- .owner = THIS_MODULE,
},
.probe = unittest_i2c_mux_probe,
.remove = unittest_i2c_mux_remove,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->dev.release = free_pardevice;
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
- if (ret)
- goto err_put_dev;
+ if (ret) {
+ put_device(&par_dev->dev);
+ goto err_put_port;
+ }
/* Chain this onto the list */
par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
- goto err_put_dev;
+ device_unregister(&par_dev->dev);
+ goto err_put_port;
}
port->flags |= PARPORT_FLAG_EXCL;
}
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
return par_dev;
-err_put_dev:
- put_device(&par_dev->dev);
err_free_devname:
kfree(devname);
err_free_par_dev:
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..944f50015ed0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
# PCI configuration
#
config PCI_BUS_ADDR_T_64BIT
- def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
depends on PCI
config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..b978bbfe044c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
parent = pci_upstream_bridge(pdev);
- if (!parent->has_secondary_link)
+
+ /*
+ * Usually there's an upstream device (Root Port or Switch
+ * Downstream Port), but we can't assume one exists.
+ */
+ if (parent && !parent->has_secondary_link)
pdev->has_secondary_link = 1;
}
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
config PHY_PXA_28NM_HSIC
tristate "Marvell USB HSIC 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
config PHY_PXA_28NM_USB2
tristate "Marvell USB 2.0 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
static const u32 phy_berlin_pll_dividers[] = {
/* Berlin 2 */
- CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
- /* Berlin 2CD */
CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+ /* Berlin 2CD/Q */
+ CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
};
struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
}
+EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
static struct phy_ops sun4i_usb_phy_ops = {
.init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,8 @@
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -53,6 +54,8 @@
#define PLL_LOCK 0x2
#define PLL_IDLE 0x1
+#define SATA_PLL_SOFT_RESET BIT(18)
+
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -83,10 +86,9 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- bool enabled;
- spinlock_t lock; /* serialize clock enable/disable */
- /* the below flag is needed specifically for SATA */
- bool refclk_enabled;
+ struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
+ unsigned int dpll_reset_reg; /* reg. index within syscon */
+ bool sata_refclk_enabled;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +139,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
return NULL;
}
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
+
static int ti_pipe3_power_off(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +222,7 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ ti_pipe3_enable_clocks(phy);
/*
* Set pcie_pcs register to 0x96 for proper functioning of phy
* as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +256,46 @@ static int ti_pipe3_exit(struct phy *x)
u32 val;
unsigned long timeout;
- /* SATA DPLL can't be powered down due to Errata i783 and PCIe
- * does not have internal DPLL
+ /* If dpll_reset_syscon is not present we wont power down SATA DPLL
+ * due to Errata i783
*/
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
- of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
+ !phy->dpll_reset_syscon)
return 0;
- /* Put DPLL in IDLE mode */
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val |= PLL_IDLE;
- ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+ /* PCIe doesn't have internal DPLL */
+ if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+ /* Put DPLL in IDLE mode */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
- /* wait for LDO and Oscillator to power down */
- timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
- do {
- cpu_relax();
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
- break;
- } while (!time_after(jiffies, timeout));
+ /* wait for LDO and Oscillator to power down */
+ timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+ dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+ val);
+ return -EBUSY;
+ }
+ }
- if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
- dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
- val);
- return -EBUSY;
+ /* i783: SATA needs control bit toggle after PLL unlock */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, 0);
}
+ ti_pipe3_disable_clocks(phy);
+
return 0;
}
static struct phy_ops ops = {
@@ -306,7 +325,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
return -ENOMEM;
phy->dev = &pdev->dev;
- spin_lock_init(&phy->lock);
if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
} else {
phy->wkupclk = ERR_PTR(-ENODEV);
+ phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
+ "syscon-pllreset");
+ if (IS_ERR(phy->dpll_reset_syscon)) {
+ dev_info(&pdev->dev,
+ "can't get syscon-pllreset, sata dpll won't idle\n");
+ phy->dpll_reset_syscon = NULL;
+ } else {
+ if (of_property_read_u32_index(node,
+ "syscon-pllreset", 1,
+ &phy->dpll_reset_reg)) {
+ dev_err(&pdev->dev,
+ "couldn't get pllreset reg. offset\n");
+ return -EINVAL;
+ }
+ }
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -403,6 +436,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
+ /*
+ * Prevent auto-disable of refclk for SATA PHY due to Errata i783
+ */
+ if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (!IS_ERR(phy->refclk)) {
+ clk_prepare_enable(phy->refclk);
+ phy->sata_refclk_enabled = true;
+ }
+ }
+
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
@@ -413,63 +456,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- pm_runtime_get(&pdev->dev);
-
return 0;
}
static int ti_pipe3_remove(struct platform_device *pdev)
{
- if (!pm_runtime_suspended(&pdev->dev))
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
{
- if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
- int ret;
+ int ret = 0;
+ if (!IS_ERR(phy->refclk)) {
ret = clk_prepare_enable(phy->refclk);
if (ret) {
dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
return ret;
}
- phy->refclk_enabled = true;
}
- return 0;
-}
-
-static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
-{
- if (!IS_ERR(phy->refclk))
- clk_disable_unprepare(phy->refclk);
-
- phy->refclk_enabled = false;
-}
-
-static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->enabled)
- goto err1;
-
- ret = ti_pipe3_enable_refclk(phy);
- if (ret)
- goto err1;
-
if (!IS_ERR(phy->wkupclk)) {
ret = clk_prepare_enable(phy->wkupclk);
if (ret) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err2;
+ goto disable_refclk;
}
}
@@ -477,96 +490,43 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
ret = clk_prepare_enable(phy->div_clk);
if (ret) {
dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
- goto err3;
+ goto disable_wkupclk;
}
}
- phy->enabled = true;
- spin_unlock_irqrestore(&phy->lock, flags);
return 0;
-err3:
+disable_wkupclk:
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
-err2:
+disable_refclk:
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- ti_pipe3_disable_refclk(phy);
-err1:
- spin_unlock_irqrestore(&phy->lock, flags);
return ret;
}
static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (!phy->enabled) {
- spin_unlock_irqrestore(&phy->lock, flags);
- return;
- }
-
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- /* Don't disable refclk for SATA PHY due to Errata i783 */
- if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
- ti_pipe3_disable_refclk(phy);
+ if (!IS_ERR(phy->refclk)) {
+ clk_disable_unprepare(phy->refclk);
+ /*
+ * SATA refclk needs an additional disable as we left it
+ * on in probe to avoid Errata i783
+ */
+ if (phy->sata_refclk_enabled) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
+ }
+
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
- phy->enabled = false;
- spin_unlock_irqrestore(&phy->lock, flags);
}
-static int ti_pipe3_runtime_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
-}
-
-static int ti_pipe3_runtime_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = ti_pipe3_enable_clocks(phy);
- return ret;
-}
-
-static int ti_pipe3_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
-}
-
-static int ti_pipe3_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret;
-
- ret = ti_pipe3_enable_clocks(phy);
- if (ret)
- return ret;
-
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops ti_pipe3_pm_ops = {
- SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
- ti_pipe3_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
-};
-
static const struct of_device_id ti_pipe3_id_table[] = {
{
.compatible = "ti,phy-usb3",
@@ -592,7 +552,6 @@ static struct platform_driver ti_pipe3_driver = {
.remove = ti_pipe3_remove,
.driver = {
.name = "ti-pipe3",
- .pm = &ti_pipe3_pm_ops,
.of_match_table = ti_pipe3_id_table,
},
};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
spin_lock_irqsave(&pc->irq_lock[bank], flags);
bcm2835_gpio_irq_config(pc, gpio, false);
+ /* Clear events that were latched prior to clearing event sources */
+ bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx1_pinctrl_soc_info *info = ipctl->info;
int i;
for (i = 0; i != num_configs; ++i) {
imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
- info->pins[pin_id].name);
+ pin_desc_get(pctldev, pin_id)->name);
}
return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
.set_mux = abx500_pmx_set,
.gpio_request_enable = abx500_gpio_request_enable,
.gpio_disable_free = abx500_gpio_disable_free,
- .strict = true,
};
static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
else
*reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~LPC18XX_SCU_PIN_ZIF;
else
*reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
int res;
res = request_irq(pcs_soc->irq, pcs_irq_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD,
name, pcs_soc);
if (res) {
pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
#include "../core.h"
#include "pinctrl-samsung.h"
-#define GROUP_SUFFIX "-grp"
-#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
-#define FUNCTION_SUFFIX "-mux"
-#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
-
/* list of all possible config options supported */
static struct pin_config {
const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
#define _GP_GPIO(bank, _pin, _name, sfx) \
- [(bank * 32) + _pin] = { \
+ { \
.pin = (bank * 32) + _pin, \
.name = __stringify(_name), \
.enum_id = _name##_DATA, \
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index f87a5eaf75da..0afaf79a4e51 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Inspired from:
* - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index dc8bf85ecb2a..27c2cc8d83ad 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index a7bdc537efa7..92611bb757ac 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void)
}
module_exit(spear1310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f43ec85a0328..f842e9dc40d0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1340 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void)
}
module_exit(spear1340_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index da8990a8eeef..d998a2ccff48 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr300 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void)
}
module_exit(spear300_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 31ede51e819b..609b18aceb16 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void)
}
module_exit(spear310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 506e40b641e0..c07114431bd4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr320 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void)
}
module_exit(spear320_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 12ee21af766b..d3119aafe709 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 7860b36053c4..ce19dcf8f08b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
* Header file for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ed317ccac4a2..aaeeae81e3a9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
static struct calling_interface_buffer *buffer;
static DEFINE_MUTEX(buffer_mutex);
-static int hwswitch_state;
+static void clear_buffer(void)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+}
static void get_buffer(void)
{
mutex_lock(&buffer_mutex);
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ clear_buffer();
}
static void release_buffer(void)
@@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked)
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
+ int hwswitch;
+ int status;
+ int ret;
get_buffer();
+
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ status = buffer->output[1];
+
+ if (ret != 0)
+ goto out;
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ hwswitch = buffer->output[1];
/* If the hardware switch controls this radio, and the hardware
switch is disabled, always disable the radio */
- if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16)))
+ if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
+ (status & BIT(0)) && !(status & BIT(16)))
disable = 1;
+ clear_buffer();
+
buffer->input[0] = (1 | (radio<<8) | (disable << 16));
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ out:
release_buffer();
- return 0;
+ return dell_smi_error(ret);
}
/* Must be called with the buffer held */
@@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
int block = rfkill_blocked(rfkill);
+ clear_buffer();
buffer->input[0] = (1 | (radio << 8) | (block << 16));
dell_send_request(buffer, 17, 11);
} else {
@@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
}
static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
- int status)
+ int status, int hwswitch)
{
- if (hwswitch_state & (BIT(radio - 1)))
+ if (hwswitch & (BIT(radio - 1)))
rfkill_set_hw_state(rfkill, !(status & BIT(16)));
}
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
+ int radio = ((unsigned long)data & 0xF);
+ int hwswitch;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
- dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
+ if (ret != 0 || !(status & BIT(0))) {
+ release_buffer();
+ return;
+ }
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ hwswitch = buffer->output[1];
release_buffer();
+
+ if (ret != 0)
+ return;
+
+ dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
}
static const struct rfkill_ops dell_rfkill_ops = {
@@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
+ int hwswitch_state;
+ int hwswitch_ret;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ hwswitch_ret = buffer->output[0];
+ hwswitch_state = buffer->output[1];
+
release_buffer();
+ seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
status & BIT(0));
@@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
(status & BIT(21)) >> 21);
- seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
+ seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
+ seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
hwswitch_state & BIT(0));
seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
@@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ int hwswitch = 0;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
+ if (ret != 0)
+ goto out;
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+
+ if (ret == 0 && (status & BIT(0)))
+ hwswitch = buffer->output[1];
+
if (wifi_rfkill) {
- dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
+ dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
}
if (bluetooth_rfkill) {
- dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
+ dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
+ hwswitch);
dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
}
if (wwan_rfkill) {
- dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
+ dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
}
+ out:
release_buffer();
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void)
get_buffer();
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
- buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
- hwswitch_state = buffer->output[1];
release_buffer();
- if (!(status & BIT(0))) {
- if (force_rfkill) {
- /* No hwsitch, clear all hw-controlled bits */
- hwswitch_state &= ~7;
- } else {
- /* rfkill is only tested on laptops with a hwswitch */
- return 0;
- }
- }
+ /* dell wireless info smbios call is not supported */
+ if (ret != 0)
+ return 0;
+
+ /* rfkill is only tested on laptops with a hwswitch */
+ if (!(status & BIT(0)) && !force_rfkill)
+ return 0;
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
@@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
- int ret = 0;
+ int token;
+ int ret;
+
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token == -1)
+ return -ENODEV;
get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+ buffer->input[0] = token;
buffer->input[1] = bd->props.brightness;
- if (buffer->input[0] == -1) {
- ret = -ENODEV;
- goto out;
- }
-
if (power_supply_is_system_supplied() > 0)
dell_send_request(buffer, 1, 2);
else
dell_send_request(buffer, 1, 1);
- out:
+ ret = dell_smi_error(buffer->output[0]);
+
release_buffer();
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
- int ret = 0;
+ int token;
+ int ret;
- get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token == -1)
+ return -ENODEV;
- if (buffer->input[0] == -1) {
- ret = -ENODEV;
- goto out;
- }
+ get_buffer();
+ buffer->input[0] = token;
if (power_supply_is_system_supplied() > 0)
dell_send_request(buffer, 0, 2);
else
dell_send_request(buffer, 0, 1);
- ret = buffer->output[1];
+ if (buffer->output[0])
+ ret = dell_smi_error(buffer->output[0]);
+ else
+ ret = buffer->output[1];
- out:
release_buffer();
return ret;
}
@@ -2036,6 +2112,7 @@ static void kbd_led_exit(void)
static int __init dell_init(void)
{
int max_intensity = 0;
+ int token;
int ret;
if (!dmi_check_system(dell_device_table))
@@ -2094,13 +2171,15 @@ static int __init dell_init(void)
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
- get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
- if (buffer->input[0] != -1) {
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token != -1) {
+ get_buffer();
+ buffer->input[0] = token;
dell_send_request(buffer, 0, 2);
- max_intensity = buffer->output[3];
+ if (buffer->output[0] == 0)
+ max_intensity = buffer->output[3];
+ release_buffer();
}
- release_buffer();
if (max_intensity) {
struct backlight_properties props;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index d734763dab69..105cfffe82c6 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev {
struct completion cmd_complete;
/* The following PMC BARs share the same ACPI device with the IPC */
- void *acpi_io_base;
+ resource_size_t acpi_io_base;
int acpi_io_size;
struct platform_device *tco_dev;
/* gcr */
- void *gcr_base;
+ resource_size_t gcr_base;
int gcr_size;
/* punit */
- void *punit_base;
+ resource_size_t punit_base;
int punit_size;
- void *punit_base2;
+ resource_size_t punit_base2;
int punit_size2;
struct platform_device *punit_dev;
} ipcdev;
@@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void)
return ret;
}
-/*
- * intel_pmc_ipc_simple_command
- * @cmd: command
- * @sub: sub type
+/**
+ * intel_pmc_ipc_simple_command() - Simple IPC command
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ *
+ * Send a simple IPC command to PMC when don't need to specify
+ * input/output data and source/dest pointers.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_simple_command(int cmd, int sub)
{
@@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
}
EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
-/*
- * intel_pmc_ipc_raw_cmd
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- * @sptr: data writing to SPTR register
- * @dptr: data writing to DPTR register
+/**
+ * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ * @in: input data of this IPC command.
+ * @inlen: input data length in bytes.
+ * @out: output data of this IPC command.
+ * @outlen: output data length in dwords.
+ * @sptr: data writing to SPTR register.
+ * @dptr: data writing to DPTR register.
+ *
+ * Send an IPC command to PMC with input/output data and source/dest pointers.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
u32 outlen, u32 dptr, u32 sptr)
@@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
}
EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
-/*
- * intel_pmc_ipc_command
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
+/**
+ * intel_pmc_ipc_command() - IPC command with input/output data
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ * @in: input data of this IPC command.
+ * @inlen: input data length in bytes.
+ * @out: output data of this IPC command.
+ * @outlen: output data length in dwords.
+ *
+ * Send an IPC command to PMC with input/output data.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen)
@@ -480,11 +493,11 @@ static int ipc_create_punit_device(void)
pdev->dev.parent = ipcdev.dev;
res = punit_res;
- res->start = (resource_size_t)ipcdev.punit_base;
+ res->start = ipcdev.punit_base;
res->end = res->start + ipcdev.punit_size - 1;
res = punit_res + PUNIT_RESOURCE_INTER;
- res->start = (resource_size_t)ipcdev.punit_base2;
+ res->start = ipcdev.punit_base2;
res->end = res->start + ipcdev.punit_size2 - 1;
ret = platform_device_add_resources(pdev, punit_res,
@@ -522,15 +535,15 @@ static int ipc_create_tco_device(void)
pdev->dev.parent = ipcdev.dev;
res = tco_res + TCO_RESOURCE_ACPI_IO;
- res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET;
+ res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
res->end = res->start + TCO_REGS_SIZE - 1;
res = tco_res + TCO_RESOURCE_SMI_EN_IO;
- res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET;
+ res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
res->end = res->start + SMI_EN_SIZE - 1;
res = tco_res + TCO_RESOURCE_GCR_MEM;
- res->start = (resource_size_t)ipcdev.gcr_base;
+ res->start = ipcdev.gcr_base;
res->end = res->start + ipcdev.gcr_size - 1;
ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
@@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.acpi_io_base = (void *)res->start;
+ ipcdev.acpi_io_base = res->start;
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.punit_base = (void *)res->start;
+ ipcdev.punit_base = res->start;
ipcdev.punit_size = size;
dev_info(&pdev->dev, "punit data res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.punit_base2 = (void *)res->start;
+ ipcdev.punit_base2 = res->start;
ipcdev.punit_size2 = size;
dev_info(&pdev->dev, "punit interface res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
}
ipcdev.ipc_base = addr;
- ipcdev.gcr_base = (void *)(res->start + size);
+ ipcdev.gcr_base = res->start + size;
ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
dev_info(&pdev->dev, "ipc res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 001b199a8c33..187d1086d15c 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
int nc;
u32 offset = 0;
int err;
- u8 cbuf[IPC_WWBUF_SIZE] = { };
+ u8 cbuf[IPC_WWBUF_SIZE];
u32 *wbuf = (u32 *)&cbuf;
- mutex_lock(&ipclock);
-
memset(cbuf, 0, sizeof(cbuf));
+ mutex_lock(&ipclock);
+
if (ipcdev.pdev == NULL) {
mutex_unlock(&ipclock);
return -ENODEV;
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..a62a89674fb5 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -78,7 +78,6 @@ struct pm800_regulator_info {
};
struct pm800_regulators {
- struct regulator_dev *regulators[PM800_ID_RG_MAX];
struct pm80x_chip *chip;
struct regmap *map;
};
@@ -92,14 +91,16 @@ struct pm800_regulators {
* not the constant voltage table.
* n_volt - Number of available selectors
*/
-#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt) \
+#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
{ \
.desc = { \
- .name = #vreg, \
- .ops = &pm800_volt_range_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = PM800_ID_##vreg, \
- .owner = THIS_MODULE, \
+ .name = #vreg, \
+ .of_match = of_match_ptr(#match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &pm800_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
.n_voltages = n_volt, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
@@ -108,7 +109,7 @@ struct pm800_regulators {
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
- .max_ua = (amax), \
+ .max_ua = (amax), \
}
/*
@@ -120,22 +121,24 @@ struct pm800_regulators {
* For all the LDOes, there are too many ranges. Using volt_table will be
* simpler and faster.
*/
-#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table) \
+#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
{ \
.desc = { \
- .name = #vreg, \
- .ops = &pm800_volt_table_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = PM800_ID_##vreg, \
- .owner = THIS_MODULE, \
- .n_voltages = ARRAY_SIZE(ldo_volt_table), \
- .vsel_reg = PM800_##vreg##_VOUT, \
- .vsel_mask = 0x1f, \
- .enable_reg = PM800_##ereg, \
- .enable_mask = 1 << (ebit), \
- .volt_table = ldo_volt_table, \
+ .name = #vreg, \
+ .of_match = of_match_ptr(#match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &pm800_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .vsel_reg = PM800_##vreg##_VOUT, \
+ .vsel_mask = 0xf, \
+ .enable_reg = PM800_##ereg, \
+ .enable_mask = 1 << (ebit), \
+ .volt_table = ldo_volt_table, \
}, \
- .max_ua = (amax), \
+ .max_ua = (amax), \
}
/* Ranges are sorted in ascending order. */
@@ -178,122 +181,66 @@ static int pm800_get_current_limit(struct regulator_dev *rdev)
}
static struct regulator_ops pm800_volt_range_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .get_current_limit = pm800_get_current_limit,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
};
static struct regulator_ops pm800_volt_table_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .get_current_limit = pm800_get_current_limit,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
};
/* The array is indexed by id(PM800_ID_XXX) */
static struct pm800_regulator_info pm800_regulator_info[] = {
- PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
- PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
-
- PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
- PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
- PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
- PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
-};
-
-#define PM800_REGULATOR_OF_MATCH(_name, _id) \
- [PM800_ID_##_id] = { \
- .name = #_name, \
- .driver_data = &pm800_regulator_info[PM800_ID_##_id], \
- }
-
-static struct of_regulator_match pm800_regulator_matches[] = {
- PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
- PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
- PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
- PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
- PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
- PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
- PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
- PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
- PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
- PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
- PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
- PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
- PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
- PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
- PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
- PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
- PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
- PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
- PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
- PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
- PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
- PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
- PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
- PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
+ PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+ PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+
+ PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+ PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+ PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+ PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
};
-static int pm800_regulator_dt_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- ret = of_regulator_match(&pdev->dev, np,
- pm800_regulator_matches,
- ARRAY_SIZE(pm800_regulator_matches));
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
struct pm800_regulators *pm800_data;
- struct pm800_regulator_info *info;
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
- if (!pdata || pdata->num_regulators == 0) {
- if (IS_ENABLED(CONFIG_OF)) {
- ret = pm800_regulator_dt_init(pdev);
- if (ret)
- return ret;
- } else {
- return -ENODEV;
- }
- } else if (pdata->num_regulators) {
+ if (pdata && pdata->num_regulators) {
unsigned int count = 0;
/* Check whether num_regulator is valid. */
@@ -303,8 +250,6 @@ static int pm800_regulator_probe(struct platform_device *pdev)
}
if (count != pdata->num_regulators)
return -EINVAL;
- } else {
- return -EINVAL;
}
pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
@@ -317,30 +262,27 @@ static int pm800_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pm800_data);
+ config.dev = chip->dev;
+ config.regmap = pm800_data->map;
for (i = 0; i < PM800_ID_RG_MAX; i++) {
- if (!pdata || pdata->num_regulators == 0)
- init_data = pm800_regulator_matches[i].init_data;
- else
+ struct regulator_dev *regulator;
+
+ if (pdata && pdata->num_regulators) {
init_data = pdata->regulators[i];
- if (!init_data)
- continue;
- info = pm800_regulator_matches[i].driver_data;
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.regmap = pm800_data->map;
- config.of_node = pm800_regulator_matches[i].of_node;
-
- pm800_data->regulators[i] =
- regulator_register(&info->desc, &config);
- if (IS_ERR(pm800_data->regulators[i])) {
- ret = PTR_ERR(pm800_data->regulators[i]);
- dev_err(&pdev->dev, "Failed to register %s\n",
- info->desc.name);
+ if (!init_data)
+ continue;
- while (--i >= 0)
- regulator_unregister(pm800_data->regulators[i]);
+ config.init_data = init_data;
+ }
+
+ config.driver_data = &pm800_regulator_info[i];
+ regulator = devm_regulator_register(&pdev->dev,
+ &pm800_regulator_info[i].desc, &config);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev, "Failed to register %s\n",
+ pm800_regulator_info[i].desc.name);
return ret;
}
}
@@ -348,23 +290,11 @@ static int pm800_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pm800_regulator_remove(struct platform_device *pdev)
-{
- struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < PM800_ID_RG_MAX; i++)
- regulator_unregister(pm800_data->regulators[i]);
-
- return 0;
-}
-
static struct platform_driver pm800_regulator_driver = {
.driver = {
.name = "88pm80x-regulator",
},
.probe = pm800_regulator_probe,
- .remove = pm800_regulator_remove,
};
module_platform_driver(pm800_regulator_driver);
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 48016a050d5f..ea50a886ba63 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -275,4 +275,3 @@ module_exit(ad5398_exit);
MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
MODULE_AUTHOR("Sonic Zhang");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad5398-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..584cbbd38d47 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
+static void _regulator_put(struct regulator *regulator);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -295,7 +296,7 @@ static int regulator_check_drms(struct regulator_dev *rdev)
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_dbg(rdev, "operation not allowed\n");
return -EPERM;
}
return 0;
@@ -640,6 +641,8 @@ static int drms_uA_update(struct regulator_dev *rdev)
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
+ lockdep_assert_held_once(&rdev->mutex);
+
/*
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
@@ -760,6 +763,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
/* locks held by caller */
static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
{
+ lockdep_assert_held_once(&rdev->mutex);
+
if (!rdev->constraints)
return -EINVAL;
@@ -1105,6 +1110,9 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+ if (!try_module_get(supply_rdev->owner))
+ return -ENODEV;
+
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
if (rdev->supply == NULL) {
err = -ENOMEM;
@@ -1381,9 +1389,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
if (!r) {
- dev_err(dev, "Failed to resolve %s-supply for %s\n",
- rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ if (have_full_constraints()) {
+ r = dummy_regulator_rdev;
+ } else {
+ dev_err(dev, "Failed to resolve %s-supply for %s\n",
+ rdev->supply_name, rdev->desc->name);
+ return -EPROBE_DEFER;
+ }
}
/* Recursively resolve the supply of the supply */
@@ -1398,8 +1410,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Cascade always-on state to supply */
if (_regulator_is_enabled(rdev)) {
ret = regulator_enable(rdev->supply);
- if (ret < 0)
+ if (ret < 0) {
+ if (rdev->supply)
+ _regulator_put(rdev->supply);
return ret;
+ }
}
return 0;
@@ -1584,9 +1599,11 @@ static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
- if (regulator == NULL || IS_ERR(regulator))
+ if (IS_ERR_OR_NULL(regulator))
return;
+ lockdep_assert_held_once(&regulator_list_mutex);
+
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@@ -1965,6 +1982,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret;
+ lockdep_assert_held_once(&rdev->mutex);
+
/* check voltage and requested load before enabling */
if (rdev->constraints &&
(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -2065,6 +2084,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
{
int ret = 0;
+ lockdep_assert_held_once(&rdev->mutex);
+
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
return -EIO;
@@ -2143,6 +2164,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
+ lockdep_assert_held_once(&rdev->mutex);
+
ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
REGULATOR_EVENT_PRE_DISABLE, NULL);
if (ret & NOTIFY_STOP_MASK)
@@ -2711,7 +2734,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
- * return succesfully even though the regulator does not support
+ * return successfully even though the regulator does not support
* changing the voltage.
*/
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
@@ -3439,6 +3462,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
+ lockdep_assert_held_once(&rdev->mutex);
+
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -3813,11 +3838,11 @@ void regulator_unregister(struct regulator_dev *rdev)
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
+ mutex_unlock(&regulator_list_mutex);
kfree(rdev->constraints);
regulator_ena_gpio_free(rdev);
of_node_put(rdev->dev.of_node);
device_unregister(&rdev->dev);
- mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index dd76da09b3c7..5638fe8d759d 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -818,7 +818,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
static struct platform_driver da9062_regulator_driver = {
.driver = {
.name = "da9062-regulators",
- .owner = THIS_MODULE,
},
.probe = da9062_regulator_probe,
};
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..e94ddcf97722 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
if (of_property_read_bool(np, "maxim,enable-bias-control"))
- pdata->control_flags |= MAX8973_BIAS_ENABLE;
+ pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
return pdata;
}
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index bd2b75c0d1d1..4fa7bcaf454e 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -30,6 +30,7 @@
struct pbias_reg_info {
u32 enable;
u32 enable_mask;
+ u32 disable_val;
u32 vmode;
unsigned int enable_time;
char *name;
@@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.enable = BIT(1),
.enable_mask = BIT(1),
.vmode = BIT(0),
+ .disable_val = 0,
.enable_time = 100,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
static const struct pbias_reg_info pbias_mmc_omap4 = {
.enable = BIT(26) | BIT(22),
.enable_mask = BIT(26) | BIT(25) | BIT(22),
+ .disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.name = "pbias_mmc_omap4"
@@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
static const struct pbias_reg_info pbias_mmc_omap5 = {
.enable = BIT(27) | BIT(26),
.enable_mask = BIT(27) | BIT(25) | BIT(26),
+ .disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.name = "pbias_mmc_omap5"
@@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.enable_reg = res->start;
drvdata[data_idx].desc.enable_mask = info->enable_mask;
drvdata[data_idx].desc.enable_val = info->enable;
+ drvdata[data_idx].desc.disable_val = info->disable_val;
cfg.init_data = pbias_matches[idx].init_data;
cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
struct s2mps11_info {
unsigned int rdev_num;
int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
* One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
* the suspend mode was enabled.
*/
- unsigned long long s2mps14_suspend_state:50;
+ DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
/* Array of size rdev_num with GPIO-s for external sleep control */
int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
switch (s2mps11->dev_type) {
case S2MPS13X:
case S2MPS14X:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
val = rdev->desc->enable_mask;
break;
case S2MPU02:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPU02_ENABLE_SUSPEND;
else
val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+ set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
/*
* Don't enable suspend mode if regulator is already disabled because
* this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
case S2MPS11X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS13X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
regulators = s2mps13_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS14X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPU02:
s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
regulators = s2mpu02_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 4b62d1a875e4..2b08cac62f07 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct armada38x_rtc *rtc = dev_get_drvdata(dev);
int ret = 0;
- unsigned long time, flags;
+ unsigned long time;
ret = rtc_tm_to_time(tm, &time);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index c0090b698ff3..eab230be5a54 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
goto out_dispose_irq;
}
+ device_init_wakeup(&pdev->dev, 1);
+
rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev,
&mtk_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
@@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev)
goto out_free_irq;
}
- device_init_wakeup(&pdev->dev, 1);
-
return 0;
out_free_irq:
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 specific device drivers
#
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1aec8ff0b587..f73d2f579a7e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
}
/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* dasd is being set offline. */
+ return 1;
+ }
+ if (device->stopped) {
+ if (device->stopped & mask) {
+ /* stopped and CQR will not change that. */
+ return 1;
+ }
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ /* CQR is not able to change device to
+ * operational. */
+ return 1;
+ }
+ /* CQR required to get device operational. */
+ }
+ return 0;
+}
+
+/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started.
*/
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if (cqr->status != DASD_CQR_QUEUED)
return;
- /* when device is stopped, return request to previous layer
- * exception: only the disconnect or unresumed bits are set and the
- * cqr is a path verification request
- */
- if (device->stopped &&
- !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
- && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+ /* if device is not usable return request to upper layer */
+ if (__dasd_device_is_unusable(device, cqr)) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index a2597e683e79..ee3a6faae22a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
alias_priv = (struct dasd_eckd_private *) alias_device->private;
- if ((alias_priv->count < private->count) && !alias_device->stopped)
+ if ((alias_priv->count < private->count) && !alias_device->stopped &&
+ !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
return alias_device;
else
return NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index aeed7969fd79..7bc6df3100ef 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -7,6 +7,7 @@
#define KMSG_COMPONENT "sclp_early"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/errno.h>
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 08f1830cbfc4..01bf1f5cf2e9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
static DEFINE_SPINLOCK(zcrypt_device_lock);
static LIST_HEAD(zcrypt_device_list);
static int zcrypt_device_count = 0;
@@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
static struct hwrng zcrypt_rng_dev = {
.name = "zcrypt",
.data_read = zcrypt_rng_data_read,
+ .quality = 990,
};
static int zcrypt_rng_device_add(void)
@@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void)
goto out;
}
zcrypt_rng_buffer_index = 0;
+ if (!zcrypt_hwrng_seed)
+ zcrypt_rng_dev.quality = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.17"
+#define DRV_VERSION "1.6.0.17a"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
+ int io_lock_acquired = 0;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
+ io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
- if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+ if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
{
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ unsigned int trace_index;
- trace_entry = &ioa_cfg->trace[atomic_add_return
- (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+ trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
+ trace_entry = &ioa_cfg->trace[trace_index];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
{
+ unsigned int hrrq;
+
if (ioa_cfg->hrrq_num == 1)
- return 0;
- else
- return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+ hrrq = 0;
+ else {
+ hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+ hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+ }
+ return hrrq;
}
/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long hrrq_flags;
+ unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock(&ipr_cmd->hrrq->_lock);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
#define IPR_NUM_TRACE_INDEX_BITS 8
#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
char trace_start[8];
#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
if (resp) {
resp(sp, fp, arg);
res = true;
- } else if (!IS_ERR(fp)) {
- fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that
* first.
*/
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
fc_exch_release(ep);
return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;
del_timer_sync(&conn->transport_timer);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->frwd_lock);
- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- atomic_read(&session->host->host_busy),
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
* | | | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe079 | |
- * | Target Mode Management | 0xf072 | 0xf002 |
+ * | Target Mode | 0xe080 | |
+ * | Target Mode Management | 0xf096 | 0xf002 |
* | | | 0xf046-0xf049 |
- * | Target Mode Task Management | 0x1000b | |
+ * | Target Mode Task Management | 0x1000d | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
struct req_que;
+struct qla_tgt_sess;
/*
* (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
uint16_t port_id;
unsigned long retry_delay_timestamp;
+ struct qla_tgt_sess *tgt_session;
} fc_port_t;
#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0))
-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
+ /* list of commands waiting on workqueue */
+ struct list_head qla_cmd_list;
+ struct list_head qla_sess_op_cmd_list;
+ spinlock_t cmd_list_lock;
+
+ /* Counter to detect races between ELS and RSCN events */
+ atomic_t generation_tick;
+ /* Time when global fcport update has been scheduled */
+ int total_fcport_update_gen;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
QLA_LOGIO_LOGIN_RETRIED : 0;
qla2x00_post_async_login_done_work(fcport->vha, fcport,
lio->u.logio.data);
+ } else if (sp->type == SRB_LOGOUT_CMD) {
+ qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
}
}
@@ -497,7 +499,10 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
return;
}
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
- scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport) {
+ if (rport)
fc_remote_port_delete(rport);
- /*
- * Release the target mode FC NEXUS in qla_target.c code
- * if target mod is enabled.
- */
- qlt_fc_port_deleted(vha, fcport);
- }
}
/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
* Create target mode FC NEXUS in qla_target.c if target mode is
* enabled..
*/
+
qlt_fc_port_added(vha, fcport);
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- qla2x00_reg_remote_port(vha, fcport);
- return;
+ goto reg_port;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
- qla2x00_reg_remote_port(vha, fcport);
+
+reg_port:
+ if (qla_ini_mode_enabled(vha))
+ qla2x00_reg_remote_port(vha, fcport);
+ else {
+ /*
+ * Create target mode FC NEXUS in qla_target.c
+ */
+ qlt_fc_port_added(vha, fcport);
+ }
}
/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
LIST_HEAD(new_fcports);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int discovery_gen;
/* If FL port exists, then SNS is present */
if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->scan_state = QLA_FCPORT_SCAN;
}
+ /* Mark the time right before querying FW for connected ports.
+ * This process is long, asynchronous and by the time it's done,
+ * collected information might not be accurate anymore. E.g.
+ * disconnected port might have re-connected and a brand new
+ * session has been created. In this case session's generation
+ * will be newer than discovery_gen. */
+ qlt_do_generation_tick(vha, &discovery_gen);
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (fcport->scan_state == QLA_FCPORT_SCAN &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if (qla_ini_mode_enabled(base_vha) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ha->isp_ops->fabric_logout(vha,
+ fcport->loop_id,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_clear_loop_id(fcport);
+ }
+ } else if (!qla_ini_mode_enabled(base_vha)) {
+ /*
+ * In target mode, explicitly kill
+ * sessions and log out of devices
+ * that are gone, so that we don't
+ * end up with an initiator using the
+ * wrong ACL (if the fabric recycles
+ * an FC address and we have a stale
+ * session around) and so that we don't
+ * report initiators that are no longer
+ * on the fabric.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
+ "port gone, logging out/killing session: "
+ "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
+ "scan_state %d\n",
+ fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ qlt_fc_port_deleted(vha, fcport,
+ discovery_gen);
}
}
}
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
(fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
+ /*
+ * If we're not an initiator, skip looking for devices
+ * and logging in. There's no reason for us to do it,
+ * and it seems to actively cause problems in target
+ * mode if we race with the initiator logging into us
+ * (we might get the "port ID used" status back from
+ * our login command and log out the initiator, which
+ * seems to cause havoc).
+ */
+ if (!qla_ini_mode_enabled(base_vha)) {
+ if (fcport->scan_state == QLA_FCPORT_FOUND) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
+ "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
+ "scan_state %d (initiator mode disabled; skipping "
+ "login)\n", fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ }
+ continue;
+ }
+
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
+ /*
+ * If we're not an initiator, skip looking for devices
+ * and logging in. There's no reason for us to do it,
+ * and it seems to actively cause problems in target
+ * mode if we race with the initiator logging into us
+ * (we might get the "port ID used" status back from
+ * our login command and log out the initiator, which
+ * seems to cause havoc).
+ */
+ if (qla_ini_mode_enabled(base_vha)) {
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha,
+ fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport,
+ &next_loopid);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
+ "new port %8phC state 0x%x flags 0x%x fc4_type "
+ "0x%x scan_state %d (initiator mode disabled; "
+ "skipping login)\n",
+ fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ }
list_move_tail(&fcport->list, &vha->vp_fcports);
}
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->fp_speed = new_fcport->fp_speed;
/*
- * If address the same and state FCS_ONLINE, nothing
- * changed.
+ * If address the same and state FCS_ONLINE
+ * (or in target mode), nothing changed.
*/
if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
+ (atomic_read(&fcport->state) == FCS_ONLINE ||
+ !qla_ini_mode_enabled(base_vha))) {
break;
}
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
* Log it out if still logged in and mark it for
* relogin later.
*/
+ if (!qla_ini_mode_enabled(base_vha)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+ "port changed FC ID, %8phC"
+ " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+ fcport->port_name,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa,
+ fcport->loop_id,
+ new_fcport->d_id.b.domain,
+ new_fcport->d_id.b.area,
+ new_fcport->d_id.b.al_pa);
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ break;
+ }
+
fcport->d_id.b24 = new_fcport->d_id.b24;
fcport->flags |= FCF_LOGIN_NEEDED;
if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (found)
continue;
/* If device was not in our fcports list, then add it. */
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
list_add_tail(&new_fcport->list, new_fcports);
/* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
+
+ /*
+ * Release the target mode FC NEXUS in
+ * qla_target.c, if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport,
+ base_vha->total_fcport_update_gen);
+
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+ if (!sp->fcport->tgt_session ||
+ !sp->fcport->tgt_session->keep_nport_handle)
+ logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
*orig_iocb_cnt = mcp->mb[10];
if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
- if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
+ if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
+ IS_QLA27XX(vha->hw)) && max_fcfs)
*max_fcfs = mcp->mb[12];
}
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(rsp->rsp_q_out, 0);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
WRT_REG_DWORD(rsp->rsp_q_in, 0);
}
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_lock_irqsave(vha->host->host_lock, flags);
fcport->drport = rport;
spin_unlock_irqrestore(vha->host->host_lock, flags);
+ qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else {
- fc_remote_port_delete(rport);
- qlt_fc_port_deleted(vha, fcport);
+ int now;
+ if (rport)
+ fc_remote_port_delete(rport);
+ qlt_do_generation_tick(vha, &now);
+ qlt_fc_port_deleted(vha, fcport, now);
}
}
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->vp_fcports);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
+ INIT_LIST_HEAD(&vha->qla_cmd_list);
+ INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
spin_lock_init(&vha->work_lock);
+ spin_lock_init(&vha->cmd_list_lock);
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
{
uint32_t led_select_value = 0;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
goto out;
if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
/*
* Global Variables
*/
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+ *dest = atomic_inc_return(&base_vha->generation_tick);
+ /* memory barrier */
+ wmb();
+}
+
/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
static struct qla_tgt_sess *qlt_find_sess_by_port_name(
struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_tgt *tgt = sess->tgt;
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ bool logout_started = false;
+ fc_port_t fcport;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+ " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->logout_on_delete, sess->keep_nport_handle,
+ sess->plogi_ack_needed);
BUG_ON(!tgt);
+
+ if (sess->logout_on_delete) {
+ int rc;
+
+ memset(&fcport, 0, sizeof(fcport));
+ fcport.loop_id = sess->loop_id;
+ fcport.d_id = sess->s_id;
+ memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
+ fcport.vha = vha;
+ fcport.tgt_session = sess;
+
+ rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
+
/*
* Release the target session for FC Nexus from fabric module code.
*/
if (sess->se_sess != NULL)
ha->tgt.tgt_ops->free_session(sess);
+ if (logout_started) {
+ bool traced = false;
+
+ while (!ACCESS_ONCE(sess->logout_completed)) {
+ if (!traced) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+ "%s: waiting for sess %p logout\n",
+ __func__, sess);
+ traced = true;
+ }
+ msleep(100);
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
+ "%s: sess %p logout completed\n",
+ __func__, sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (sess->plogi_ack_needed)
+ qlt_send_notify_ack(vha, &sess->tm_iocb,
+ 0, 0, 0, 0, 0, 0);
+
+ list_del(&sess->sess_list_entry);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
"Unregistration of sess %p finished\n", sess);
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- list_del(&sess->sess_list_entry);
- if (sess->deleted)
- list_del(&sess->del_list_entry);
+ if (!list_empty(&sess->del_list_entry))
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
/* Global event */
- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
- qlt_clear_tgt_db(ha->tgt.qla_tgt);
+ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
struct qla_tgt *tgt = sess->tgt;
uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
- if (sess->deleted)
- return;
+ if (sess->deleted) {
+ /* Upgrade to unconditional deletion in case it was temporary */
+ if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
+ list_del(&sess->del_list_entry);
+ else
+ return;
+ }
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
- sess->deleted = 1;
- if (immediate)
+ if (immediate) {
dev_loss_tmo = 0;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ list_add(&sess->del_list_entry, &tgt->del_sess_list);
+ } else {
+ sess->deleted = QLA_SESS_DELETION_PENDING;
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ }
sess->expires = jiffies + dev_loss_tmo * HZ;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
- "deletion in %u secs (expires: %lu) immed: %d\n",
- sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
- sess->expires, immediate);
+ "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
+ " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
+ sess->vha->vp_idx, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
+ sess->generation);
if (immediate)
- schedule_delayed_work(&tgt->sess_del_work, 0);
+ mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
/* ha->hardware_lock supposed to be held on entry */
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
{
- BUG_ON(!sess->deleted);
+ BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
- list_del(&sess->del_list_entry);
+ list_del_init(&sess->del_list_entry);
sess->deleted = 0;
}
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
del_list_entry);
elapsed = jiffies;
if (time_after_eq(elapsed, sess->expires)) {
- qlt_undelete_sess(sess);
+ /* No turning back */
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
fcport->d_id.b.al_pa, fcport->d_id.b.area,
fcport->loop_id);
+ /* Cannot undelete at this point */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ return NULL;
+ }
+
if (sess->deleted)
qlt_undelete_sess(sess);
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
if (sess->local && !local)
sess->local = 0;
+
+ qlt_do_generation_tick(vha, &sess->generation);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
sess->local = local;
+ INIT_LIST_HEAD(&sess->del_list_entry);
+
+ /* Under normal circumstances we want to logout from firmware when
+ * session eventually ends and release corresponding nport handle.
+ * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+ * code will adjust these flags as necessary. */
+ sess->logout_on_delete = 1;
+ sess->keep_nport_handle = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
spin_lock_irqsave(&ha->hardware_lock, flags);
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
vha->vha_tgt.qla_tgt->sess_count++;
+ qlt_do_generation_tick(vha, &sess->generation);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
/*
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ * Called from qla2x00_reg_remote_port()
*/
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ /* Point of no return */
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
} else {
kref_get(&sess->se_sess->sess_kref);
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
- unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
return;
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ if (!tgt)
return;
- spin_lock_irqsave(&ha->hardware_lock, flags);
if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
}
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ if (max_gen - sess->generation < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+ "Ignoring stale deletion request for se_sess %p / sess %p"
+ " for port %8phC, req_gen %d, sess_gen %d\n",
+ sess->se_sess, sess, sess->port_name, max_gen,
+ sess->generation);
return;
}
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
sess->local = 1;
qlt_schedule_sess_for_deletion(sess, false);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
FCP_TMF_CMPL, true);
}
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+
+ spin_lock(&vha->cmd_list_lock);
+
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ if (tag == op->atio.u.isp24.exchange_addr) {
+ op->aborted = true;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ if (tag == cmd->atio.u.isp24.exchange_addr) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ spin_unlock(&vha->cmd_list_lock);
+ return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ * for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+ uint32_t lun, uint8_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+
+ key = sid_to_key(s_id);
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key;
+ uint32_t op_lun;
+
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ op_lun = scsilun_to_int(
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+ if (op_key == key && op_lun == lun)
+ op->aborted = true;
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key;
+ uint32_t cmd_lun;
+
+ cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ cmd_lun = scsilun_to_int(
+ (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+ if (cmd_key == key && cmd_lun == lun)
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ }
+ spin_unlock(&vha->cmd_list_lock);
+}
+
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
spin_unlock(&se_sess->sess_cmd_lock);
- if (!found_lun)
- return -ENOENT;
+ /* cmd not in LIO lists, look in qla list */
+ if (!found_lun) {
+ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+ /* send TASK_ABORT response immediately */
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+ return 0;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
+ "unable to find cmd in driver or LIO for tag 0x%x\n",
+ abts->exchange_addr_to_abort);
+ return -ENOENT;
+ }
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return;
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
rc = __qlt_24xx_handle_abts(vha, abts, sess);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (unlikely(cmd->aborted)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
- vha->vp_idx, cmd, se_cmd, se_cmd->tag);
-
- cmd->state = QLA_TGT_STATE_ABORTED;
- cmd->cmd_flags |= BIT_6;
-
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
-
- /* !! At this point cmd could be already freed !! */
- return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
- }
-
prm->cmd = cmd;
prm->tgt = tgt;
prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
unsigned long flags = 0;
int res;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ if (cmd->sess->logout_completed)
+ /* no need to terminate. FW already freed exchange. */
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
memset(&prm, 0, sizeof(prm));
qlt_check_srr_debug(cmd, &xmit_type);
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
if (unlikely(res != 0)) {
- if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
- return 0;
-
return res;
}
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_build_ctio_crc2_pkt(&prm, vha);
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += full_req_cnt;
goto out_unmap_unlock;
-
+ }
pkt = (struct ctio7_to_24xx *)prm.pkt;
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+ (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
/*
* Either a chip reset is active or this request was from
* previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += prm.req_cnt;
goto out_unlock_free_unmap;
+ }
+
pkt = (struct ctio7_to_24xx *)prm.pkt;
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy)
+{
+ struct nack_to_isp *nack;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+ "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe080,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+
+ /* terminate */
+ nack->u.isp24.flags |=
+ __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+ unsigned long flags = 0;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+ goto done;
+ }
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
- unsigned long flags;
+ unsigned long flags = 0;
int rc;
if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
rc = __qlt_send_term_exchange(vha, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
done:
if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
!cmd->cmd_sent_to_fw)) {
- if (!ha_locked && !in_interrupt())
- msleep(250); /* just in case */
-
- qlt_unmap_sg(vha, cmd);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
}
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
return;
}
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
}
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange for aborted cmd=%p "
+ "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+ se_cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
dump_stack();
}
- cmd->cmd_flags |= BIT_12;
+ cmd->cmd_flags |= BIT_17;
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ;
+ cmd->cmd_flags |= BIT_12;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
int rx_status = 0;
@@ -3191,9 +3492,11 @@ skip_term:
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ cmd->cmd_flags |= BIT_18;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
+ cmd->cmd_flags |= BIT_19;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
dump_stack();
}
-
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
if (tgt->tgt_stop)
goto out_term;
+ if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+ "cmd with tag %u is aborted\n",
+ cmd->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
static void qlt_do_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&cmd->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
__qlt_do_work(cmd);
}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
return cmd;
}
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&op->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ if (op->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+ "sess_op with tag %u is aborted\n",
+ op->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (op->atio.u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
+ "Dropping multy entry atio %p\n", &op->atio);
goto out_term;
}
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
memcpy(&op->atio, atio, sizeof(*atio));
op->vha = vha;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
}
+
+ /* Another WWN used to have our s_id. Our PLOGI scheduled its
+ * session deletion, but it's still in sess_del_work wq */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ ql_dbg(ql_dbg_io, vha, 0x3061,
+ "New command while old session %p is being deleted\n",
+ sess);
+ return -EFAULT;
+ }
+
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- cmd->cmd_flags = 0;
- cmd->jiffies_at_alloc = get_jiffies_64();
-
- cmd->reset_count = vha->hw->chip_reset;
-
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res;
uint8_t tmr_func;
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
"qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
tmr_func = TMR_LUN_RESET;
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
break;
case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
sizeof(struct atio_from_isp));
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+ return -EFAULT;
+
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
return __qlt_abort_task(vha, iocb, sess);
}
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+ if (fcport->tgt_session) {
+ if (rc != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+ "%s: se_sess %p / sess %p from"
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+ " LOGO failed: %#x\n",
+ __func__,
+ fcport->tgt_session->se_sess,
+ fcport->tgt_session,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, rc);
+ }
+
+ fcport->tgt_session->logout_completed = 1;
+ }
+}
+
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
+ struct imm_ntfy_from_isp *b)
+{
+ struct imm_ntfy_from_isp tmp;
+ memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
+ memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
+ memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+static struct qla_tgt_sess *
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
+ port_id_t port_id, uint16_t loop_id)
+{
+ struct qla_tgt_sess *sess = NULL, *other_sess;
+ uint64_t other_wwn;
+
+ list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+
+ other_wwn = wwn_to_u64(other_sess->port_name);
+
+ if (wwn == other_wwn) {
+ WARN_ON(sess);
+ sess = other_sess;
+ continue;
+ }
+
+ /* find other sess with nport_id collision */
+ if (port_id.b24 == other_sess->s_id.b24) {
+ if (loop_id != other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /*
+ * logout_on_delete is set by default, but another
+ * session that has the same s_id/loop_id combo
+ * might have cleared it when requested this session
+ * deletion, so don't touch it
+ */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ } else {
+ /*
+ * Another wwn used to have our s_id/loop_id
+ * combo - kill the session, but don't log out
+ */
+ sess->logout_on_delete = 0;
+ qlt_schedule_sess_for_deletion(other_sess,
+ true);
+ }
+ continue;
+ }
+
+ /* find other sess with nport handle collision */
+ if (loop_id == other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /* Same loop_id but different s_id
+ * Ok to kill and logout */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ }
+ }
+
+ return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+ int count = 0;
+
+ key = (((u32)s_id->b.domain << 16) |
+ ((u32)s_id->b.area << 8) |
+ ((u32)s_id->b.al_pa));
+
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ if (op_key == key) {
+ op->aborted = true;
+ count++;
+ }
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ if (cmd_key == key) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ count++;
+ }
+ }
+ spin_unlock(&vha->cmd_list_lock);
+
+ return count;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id;
+ uint16_t wd3_lo;
int res = 0;
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
"qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ /* res = 1 means ack at the end of thread
+ * res = 0 means ack async/later.
+ */
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
- case ELS_FLOGI:
+
+ /* Mark all stale commands in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn,
+ port_id, loop_id);
+
+ if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+ res = 1;
+ break;
+ }
+
+ if (sess->plogi_ack_needed) {
+ /*
+ * Initiator sent another PLOGI before last PLOGI could
+ * finish. Swap plogi iocbs and terminate old one
+ * without acking, new one will get acked when session
+ * deletion completes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf094,
+ "sess %p received double plogi.\n", sess);
+
+ qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+
+ res = 0;
+ break;
+ }
+
+ res = 0;
+
+ /*
+ * Save immediate Notif IOCB for Ack when sess is done
+ * and being deleted.
+ */
+ memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
+ sess->plogi_ack_needed = 1;
+
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->s_id.b24 == port_id.b24));
+ qlt_schedule_sess_for_deletion(sess, true);
+ break;
+
case ELS_PRLI:
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
+ loop_id);
+
+ if (sess != NULL) {
+ if (sess->deleted) {
+ /*
+ * Impatient initiator sent PRLI before last
+ * PLOGI could finish. Will force him to re-try,
+ * while last one finishes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf095,
+ "sess %p PRLI received, before plogi ack.\n",
+ sess);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
+
+ /*
+ * This shouldn't happen under normal circumstances,
+ * since we have deleted the old session during PLOGI
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+ "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+ sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->s_id = port_id;
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ }
+ res = 1; /* send notify ack */
+
+ /* Make session global (not used in fabric mode) */
+ if (ha->current_topology != ISP_CFG_F) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* todo: else - create sess here. */
+ res = 1; /* send notify ack */
+ }
+
+ break;
+
case ELS_LOGO:
case ELS_PRLO:
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ case ELS_FLOGI: /* should never happen */
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
"qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
/* Adjust ring index */
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
}
void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
uint32_t srr_rel_offs;
uint16_t srr_ui;
uint16_t srr_ox_id;
- uint8_t reserved_4[19];
+ union {
+ struct {
+ uint8_t node_name[8];
+ } plogi; /* PLOGI/ADISC/PDISC */
+ struct {
+ /* PRLI word 3 bit 0-15 */
+ uint16_t wd3_lo;
+ uint8_t resv0[6];
+ } prli;
+ struct {
+ uint8_t port_id[3];
+ uint8_t resv1;
+ uint16_t nport_handle;
+ uint16_t resv2;
+ } req_els;
+ } u;
+ uint8_t port_name[8];
+ uint8_t resv3[3];
uint8_t vp_index;
uint32_t reserved_5;
uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
uint8_t reserved[2];
uint16_t ox_id;
} __packed;
+#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define FC_TM_REJECT 4
#define FC_TM_FAILED 5
-/*
- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
- * terminated, so no more actions is needed and success should be returned
- * to target.
- */
-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
-
#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
#define pci_dma_lo32(a) (a & 0xffffffff)
#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
struct work_struct work;
+ struct list_head cmd_list;
+ bool aborted;
+};
+
+enum qla_sess_deletion {
+ QLA_SESS_DELETION_NONE = 0,
+ QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
+ * this one */
+ QLA_SESS_DELETION_IN_PROGRESS = 2,
};
/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
port_id_t s_id;
unsigned int conf_compl_supported:1;
- unsigned int deleted:1;
+ unsigned int deleted:2;
unsigned int local:1;
+ unsigned int logout_on_delete:1;
+ unsigned int plogi_ack_needed:1;
+ unsigned int keep_nport_handle:1;
+
+ unsigned char logout_completed;
+
+ int generation;
struct se_session *se_sess;
struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
uint8_t port_name[WWN_SIZE];
struct work_struct free_work;
+
+ union {
+ struct imm_ntfy_from_isp tm_iocb;
+ };
};
struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
unsigned int free_sg:1;
- unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
* BIT_14 - Back end data received/sent.
* BIT_15 - SRR prepare ctio
* BIT_16 - complete free
+ * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+ * BIT_18 - completion w/abort status
+ * BIT_19 - completion w/unknown status
*/
uint32_t cmd_flags;
};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
struct qla_tgt_cmd *cmd;
};
+/* Check for Switch reserved address */
+#define IS_SW_RESV_ADDR(_s_id) \
+ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct qla_tgt_sess *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
ha->host->active_mode |= MODE_INITIATOR;
}
+static inline uint32_t sid_to_key(const uint8_t *s_id)
+{
+ uint32_t key;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ return key;
+}
+
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern void qlt_logo_completion_handler(fc_port_t *, int);
+extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
-
+ cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 3000);
+ 3 * HZ);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
- cmd->cmd_flags |= BIT_3;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
if (cmd->cmd_flags & BIT_5) {
pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
-
- if (!cmd->sg_mapped)
- return;
-
- pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
- cmd->sg_mapped = 0;
+ qlt_abort_cmd(cmd);
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
return NULL;
}
- key = (((unsigned long)s_id[0] << 16) |
- ((unsigned long)s_id[1] << 8) |
- (unsigned long)s_id[2]);
+ key = sid_to_key(s_id);
pr_debug("find_sess_by_s_id: 0x%06x\n", key);
se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
void *slot;
int rc;
- key = (((unsigned long)s_id[0] << 16) |
- ((unsigned long)s_id[1] << 8) |
- (unsigned long)s_id[2]);
+ key = sid_to_key(s_id);
pr_debug("set_sess_by_s_id: %06x\n", key);
slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
}
sess->conf_compl_supported = conf_compl_supported;
+
+ /* Reset logout parameters to default */
+ sess->logout_on_delete = 1;
+ sess->keep_nport_handle = 0;
}
/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..6457a8a0db9c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -944,7 +943,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
scmd->sdb.length);
scmd->sdb.table.sgl = &ses->sense_sgl;
scmd->sc_data_direction = DMA_FROM_DEVICE;
- scmd->sdb.table.nents = 1;
+ scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
scmd->cmnd[0] = REQUEST_SENSE;
scmd->cmnd[4] = scmd->sdb.length;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- * formatted sense data buffer
- * @buf: Where to build sense data
- * @info: 64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
- if ((buf[0] & 0x7f) == 0x72) {
- u8 *ucp, len;
-
- len = buf[7];
- ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
- if (!ucp) {
- buf[7] = len + 0xa;
- ucp = buf + 8 + len;
- }
- ucp[0] = 0;
- ucp[1] = 0xa;
- ucp[2] = 0x80; /* Valid bit */
- ucp[3] = 0;
- put_unaligned_be64(info, &ucp[4]);
- } else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
- }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
{
- if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+ if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
return;
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
if (mq) {
if (nents <= SCSI_MAX_SG_SEGMENTS) {
- sdb->table.nents = nents;
- sg_init_table(sdb->table.sgl, sdb->table.nents);
+ sdb->table.nents = sdb->table.orig_nents = nents;
+ sg_init_table(sdb->table.sgl, nents);
return 0;
}
first_chunk = sdb->table.sgl;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
- int err;
+ int err = 0;
- err = blk_pre_runtime_suspend(sdev->request_queue);
- if (err)
- return err;
- if (pm && pm->runtime_suspend)
+ if (pm && pm->runtime_suspend) {
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
err = pm->runtime_suspend(dev);
- blk_post_runtime_suspend(sdev->request_queue, err);
-
+ blk_post_runtime_suspend(sdev->request_queue, err);
+ }
return err;
}
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- blk_pre_runtime_resume(sdev->request_queue);
- if (pm && pm->runtime_resume)
+ if (pm && pm->runtime_resume) {
+ blk_pre_runtime_resume(sdev->request_queue);
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
-
+ blk_post_runtime_resume(sdev->request_queue, err);
+ }
return err;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1ac38e73df7e..9ad41168d26d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
depth = simple_strtoul(buf, NULL, 0);
- if (depth < 1 || depth > sht->can_queue)
+ if (depth < 1 || depth > sdev->host->can_queue)
return -EINVAL;
retval = sht->change_queue_depth(sdev, depth);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index a85292b1d09d..e3cd3ece4412 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
}
-static int srp_parse_tmo(int *tmo, const char *buf)
+int srp_parse_tmo(int *tmo, const char *buf)
{
int res = 0;
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
return res;
}
+EXPORT_SYMBOL(srp_parse_tmo);
static ssize_t show_reconnect_delay(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- max_xfer);
- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ sdkp->disk->queue->limits.max_sectors =
+ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3f25b8fa921d..871f3553987d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp)
spin_lock(&st_use_lock);
STp->in_use = 0;
spin_unlock(&st_use_lock);
- scsi_tape_put(STp);
if (resumed)
scsi_autopm_put_device(STp->device);
+ scsi_tape_put(STp);
return retval;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
- int err, host_prot;
+ int err;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ int host_prot;
+
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on SPI_MASTER
+ depends on SPI_MASTER && HAS_DMA
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
#define SPFI_CONTROL_SOFT_RESET BIT(11)
#define SPFI_CONTROL_SEND_DMA BIT(10)
#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_SE BIT(8)
#define SPFI_CONTROL_TMODE_SHIFT 5
#define SPFI_CONTROL_TMODE_MASK 0x7
#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
xfer->rx_nbits == SPI_NBITS_QUAD)
val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ val |= SPFI_CONTROL_SE;
spfi_writel(spfi, val, SPFI_CONTROL);
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
- && (transfer->len > spi_imx->tx_wml))
+ if (spi_imx->dma_is_inited
+ && transfer->len > spi_imx->rx_wml * sizeof(u32)
+ && transfer->len > spi_imx->tx_wml * sizeof(u32))
return true;
return false;
}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
+ break;
case GQSPI_SELECT_FLASH_CS_UPPER:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
#ifdef CONFIG_OF
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
+ { .compatible = "lineartechnology,ltc2488" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index b8ee81840666..3f287c48e082 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,6 +1,6 @@
config STAGING_BOARD
bool "Staging Board Support"
- depends on OF_ADDRESS
+ depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
help
Select to enable per-board staging support code.
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
if (index == das1801hc || index == das1802hc)
return board;
index = das1801hc;
+ break;
default:
dev_err(dev->class_dev,
"Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 7125eb955ae5..8a9d4a0de129 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -31,7 +31,6 @@
#define DEBUG_PORTAL_ALLOC
#define DEBUG_SUBSYSTEM S_LND
-#include <asm/irq.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/if.h>
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM D_OTHER
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include "../include/obd_support.h"
#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ed040fbb7df8..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->current_aid = conf->aid;
- if (changed & BSS_CHANGED_BSSID) {
+ if (changed & BSS_CHANGED_BSSID && conf->bssid) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
- if (conf->assoc) {
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+ priv->op_mode != NL80211_IFTYPE_AP) {
+ if (conf->assoc && conf->beacon_rate) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f97323f19acf..af572d718135 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->current_aid = conf->aid;
- if (changed & BSS_CHANGED_BSSID)
+ if (changed & BSS_CHANGED_BSSID && conf->bssid)
vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..fd092909a457 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
@@ -3998,7 +3998,13 @@ get_immediate:
}
transport_err:
- iscsit_take_action_for_connection_exit(conn);
+ /*
+ * Avoid the normal connection failure code-path if this connection
+ * is still within LOGIN mode, and iscsi_np process context is
+ * responsible for cleaning up the early connection failure.
+ */
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+ iscsit_take_action_for_connection_exit(conn);
out:
return 0;
}
@@ -4082,7 +4088,7 @@ reject:
int iscsi_target_rx_thread(void *arg)
{
- int ret;
+ int ret, rc;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
+ /*
+ * Wait for iscsi_post_login_handler() to complete before allowing
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0)
+ return 0;
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
- int rc;
init_completion(&comp);
rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
- int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ int sleep = 1;
+ /*
+ * Traditional iscsi/tcp will invoke this logic from TX thread
+ * context during session logout, so clear tx_thread_active and
+ * sleep if iscsit_close_connection() has not already occured.
+ *
+ * Since iser-target invokes this logic from it's own workqueue,
+ * always sleep waiting for RX/TX thread shutdown to complete
+ * within iscsit_close_connection().
+ */
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
static void iscsit_logout_post_handler_samecid(
struct iscsi_conn *conn)
{
- int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ int sleep = 1;
+
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
struct iscsi_session *sess;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
+ LIST_HEAD(free_list);
int session_count = 0;
spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
- spin_unlock_bh(&se_tpg->session_lock);
- iscsit_free_session(sess);
- spin_lock_bh(&se_tpg->session_lock);
+ list_move_tail(&se_sess->sess_list, &free_list);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ iscsit_free_session(sess);
session_count++;
}
- spin_unlock_bh(&se_tpg->session_lock);
pr_debug("Released %d iSCSI Session(s) from Target Portal"
" Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
init_completion(&conn->conn_logout_comp);
init_completion(&conn->rx_half_close_comp);
init_completion(&conn->tx_half_close_comp);
+ init_completion(&conn->rx_login_comp);
spin_lock_init(&conn->cmd_lock);
spin_lock_init(&conn->conn_usage_lock);
spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
-static int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsi_conn *conn)
{
int ret = 0;
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
return 0;
out_tx:
+ send_sig(SIGINT, conn->tx_thread, 1);
kthread_stop(conn->tx_thread);
conn->tx_thread_active = false;
out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
return ret;
}
-int iscsi_post_login_handler(
+void iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- int rc;
iscsit_inc_conn_usage_count(conn);
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
- rc = iscsit_start_kthreads(conn);
- if (rc)
- return rc;
-
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
+ /*
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+ */
+ complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
+
if (stop_timer) {
spin_lock_bh(&se_tpg->session_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
}
iscsit_dec_session_usage_count(sess);
- return 0;
+ return;
}
iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);
- rc = iscsit_start_kthreads(conn);
- if (rc)
- return rc;
-
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
+ /*
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+ */
+ complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
-
- return 0;
}
static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (ret < 0)
goto new_sess_out;
- if (!conn->sess) {
- pr_err("struct iscsi_conn session pointer is NULL!\n");
- goto new_sess_out;
- }
-
iscsi_stop_login_thread_timer(np);
- if (signal_pending(current))
- goto new_sess_out;
-
if (ret == 1) {
tpg_np = conn->tpg_np;
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
- if (ret < 0)
- goto new_sess_out;
-
+ iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np);
}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
bool, bool);
extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <linux/ctype.h>
+#include <linux/kthread.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
ntohl(login_rsp->statsn), login->rsp_length);
padding = ((-login->rsp_length) & 3);
+ /*
+ * Before sending the last login response containing the transition
+ * bit for full-feature-phase, go ahead and start up TX/RX threads
+ * now to avoid potential resource allocation failures after the
+ * final login response has been sent.
+ */
+ if (login->login_complete) {
+ int rc = iscsit_start_kthreads(conn);
+ if (rc) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ }
if (conn->conn_transport->iscsit_put_login_tx(conn, login,
login->rsp_length + padding) < 0)
- return -1;
+ goto err;
login->rsp_length = 0;
mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
mutex_unlock(&sess->cmdsn_mutex);
return 0;
+
+err:
+ if (login->login_complete) {
+ if (conn->rx_thread && conn->rx_thread_active) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ if (conn->tx_thread && conn->tx_thread_active) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ }
+ return -1;
}
static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
kfree(t);
- break;
+ return;
}
}
mutex_unlock(&g_tf_lock);
@@ -747,7 +754,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
if (!dev->transport->init_prot || !dev->transport->free_prot) {
/* 0 is only allowed value for non-supporting backends */
if (flag == 0)
- return 0;
+ return count;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
@@ -1590,9 +1597,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u8 type = 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
- return 0;
+ return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
- return 0;
+ return count;
if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1665,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
res_holder = arg;
break;
case Opt_res_type:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
type = (u8)arg;
break;
case Opt_res_scope:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_res_all_tg_pt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
mapped_lun = (u64)arg;
break;
/*
@@ -1701,14 +1718,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
break;
case Opt_tpgt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_target_lun:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
target_lun = (u64)arg;
break;
default:
@@ -1985,7 +2008,7 @@ static ssize_t target_core_store_alua_lu_gp(
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
- return 0;
+ return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..be9cefc07407 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
kfree(tb);
- break;
+ return;
}
}
mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
unsigned char *buf, *ptr, proto_ident;
- const unsigned char *i_str;
+ const unsigned char *i_str = NULL;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+ dev->dev_attrib.is_nonrot = 1;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..f87d4cef6d39 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
buf[4] = 0x5;
else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
- cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
buf[4] = 0x4;
}
+ /* logical unit supports type 1 and type 3 protection */
+ if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+ (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+ (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+ buf[4] |= (0x3 << 3);
+ }
+
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -1196,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@@ -1214,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
+
nacl = sess->se_node_acl;
rcu_read_lock();
@@ -1229,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@@ -1241,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
+
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
- switch (event) {
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
- case CPUFREQ_ADJUST:
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
- continue;
+ mutex_lock(&cooling_list_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ continue;
- max_freq = cpufreq_dev->cpufreq_val;
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0,
- max_freq);
- }
- mutex_unlock(&cooling_cpufreq_lock);
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
- default:
- return NOTIFY_DONE;
}
+ mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
static struct platform_driver hisi_thermal_driver = {
.driver = {
.name = "hisi_thermal",
- .owner = THIS_MODULE,
.pm = &hisi_thermal_pm_ops,
.of_match_table = of_hisi_thermal_match,
},
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..7006860f2f36 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
u32 *req_power, *max_power, *granted_power, *extra_actor_power;
- u32 total_req_power, max_allocatable_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
u32 total_granted_power, power_range;
int i, num_actors, total_weight, ret = 0;
int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
}
/*
- * We need to allocate three arrays of the same size:
- * req_power, max_power and granted_power. They are going to
- * be needed until this function returns. Allocate them all
- * in one go to simplify the allocation and deallocation
- * logic.
+ * We need to allocate five arrays of the same size:
+ * req_power, max_power, granted_power, extra_actor_power and
+ * weighted_req_power. They are going to be needed until this
+ * function returns. Allocate them all in one go to simplify
+ * the allocation and deallocation logic.
*/
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
- req_power = devm_kcalloc(&tz->device, num_actors * 4,
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+ req_power = devm_kcalloc(&tz->device, num_actors * 5,
sizeof(*req_power), GFP_KERNEL);
if (!req_power) {
ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
max_power = &req_power[num_actors];
granted_power = &req_power[2 * num_actors];
extra_actor_power = &req_power[3 * num_actors];
+ weighted_req_power = &req_power[4 * num_actors];
i = 0;
+ total_weighted_req_power = 0;
total_req_power = 0;
max_allocatable_power = 0;
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
else
weight = instance->weight;
- req_power[i] = frac_to_int(weight * req_power[i]);
+ weighted_req_power[i] = frac_to_int(weight * req_power[i]);
if (power_actor_get_max_power(cdev, tz, &max_power[i]))
continue;
total_req_power += req_power[i];
max_allocatable_power += max_power[i];
+ total_weighted_req_power += weighted_req_power[i];
i++;
}
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
power_range = pid_controller(tz, current_temp, control_temp,
max_allocatable_power);
- divvy_up_power(req_power, max_power, num_actors, total_req_power,
- power_range, granted_power, extra_actor_power);
+ divvy_up_power(weighted_req_power, max_power, num_actors,
+ total_weighted_req_power, power_range, granted_power,
+ extra_actor_power);
total_granted_power = 0;
i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
- devm_kfree(&tz->device, req_power);
+ kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -EINVAL;
}
- params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free:
- devm_kfree(&tz->device, params);
+ kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
- devm_kfree(&tz->device, tz->governor_data);
+ kfree(tz->governor_data);
tz->governor_data = NULL;
}
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on OF
+ depends on THERMAL_OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
static int exynos_tmu_probe(struct platform_device *pdev)
{
- struct exynos_tmu_platform_data *pdata;
struct exynos_tmu_data *data;
int ret;
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
goto err_sensor;
- pdata = data->pdata;
-
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
err_sensor:
+ if (!IS_ERR_OR_NULL(data->regulator))
+ regulator_disable(data->regulator);
thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
return -ENODEV;
unbind:
+ device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* Locking: ctrl_lock
*/
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
{
- struct n_tty_data *ldata = tty->disc_data;
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, sig, 1);
put_pid(tty_pgrp);
}
+}
- if (!L_NOFLSH(tty)) {
+static void isig(int sig, struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (L_NOFLSH(tty)) {
+ /* signal only */
+ __isig(sig, tty);
+
+ } else { /* signal and flush */
up_read(&tty->termios_rwsem);
down_write(&tty->termios_rwsem);
+ __isig(sig, tty);
+
/* clear echo buffer */
mutex_lock(&ldata->output_lock);
ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
config SERIAL_SC16IS7XX
tristate "SC16IS7xx serial support"
select SERIAL_CORE
- depends on I2C || SPI_MASTER
+ depends on (SPI_MASTER && !I2C) || I2C
help
This selects support for SC16IS7xx serial ports.
Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
void __iomem *base;
base = devm_ioremap_resource(dev, mmiobase);
- if (!base)
- return -ENOMEM;
+ if (IS_ERR(base))
+ return PTR_ERR(base);
index = pl011_probe_dt_alias(index, dev);
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
port = platform_get_drvdata(pdev);
uart_remove_one_port(&etraxfs_uart_driver, port);
- etraxfs_uart_ports[pdev->id] = NULL;
+ etraxfs_uart_ports[port->line] = NULL;
return 0;
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
- /* Can we enable the DMA support? */
- if (is_imx6q_uart(sport) && !uart_console(port) &&
- !sport->dma_is_inited)
- imx_uart_dma_init(sport);
-
spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
writel(USR1_RTSD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
-
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
} else {
ucr2 |= UCR2_CTSC;
}
+
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port)
+ && !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
} else {
termios->c_cflag &= ~CRTSCTS;
}
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
(reg << SC16IS7XX_REG_SHIFT) | port->line, val);
}
+static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+ regcache_cache_bypass(s->regmap, false);
+}
+
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_write(s->regmap, addr, s->buf, to_send);
+ regcache_cache_bypass(s->regmap, false);
+}
+
static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
u8 mask, u8 val)
{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
bytes_read = 1;
} else {
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
- s->buf, rxlen);
- regcache_cache_bypass(s->regmap, false);
+ sc16is7xx_fifo_read(port, rxlen);
bytes_read = rxlen;
}
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
s->buf[i] = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
}
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
- regcache_cache_bypass(s->regmap, false);
+
+ sc16is7xx_fifo_write(port, to_send);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&port->mutex);
uart_shutdown(tty, state);
tty_port_tty_set(port, NULL);
- tty->closing = 0;
+
spin_lock_irqsave(&port->lock, flags);
if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&port->mutex);
tty_ldisc_flush(tty);
+ tty->closing = 0;
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
schedule();
continue;
}
+ __set_current_state(TASK_RUNNING);
count = sel_buffer_lth - pasted;
count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
+ if (vc->vc_uni_pagedir_loc)
+ con_free_unimap(vc);
vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
vc->vc_uni_pagedir = NULL;
vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
},
};
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+ ci_hdrc_host_driver_init();
+ return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+ platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
MODULE_ALIAS("platform:ci_hdrc");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
+ return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
- return 0;
}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
int ci_hdrc_host_init(struct ci_hdrc *ci);
void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
#else
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
#endif
#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
usb_deregister(&acm_driver);
tty_unregister_driver(acm_tty_driver);
put_tty_driver(acm_tty_driver);
+ idr_destroy(&acm_minors);
}
module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
{
return bus_register(&ulpi_bus);
}
-module_init(ulpi_init);
+subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return (retval < 0) ? retval : -EMSGSIZE;
}
- if (usb_dev->speed == USB_SPEED_SUPER) {
+
+ if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(usb_dev);
- if (retval < 0) {
+ if (!retval) {
+ usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+ } else if (usb_dev->speed == USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_list_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e5b546f1152e..c3cc1a78d1e2 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup Host regs */
- hr = hsotg->hr_backup;
- if (!hr) {
- hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
- if (!hr) {
- dev_err(hsotg->dev, "%s: can't allocate host regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->hr_backup = hr;
- }
+ hr = &hsotg->hr_backup;
hr->hcfg = readl(hsotg->regs + HCFG);
hr->haintmsk = readl(hsotg->regs + HAINTMSK);
for (i = 0; i < hsotg->core_params->host_channels; ++i)
@@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
hr->hprt0 = readl(hsotg->regs + HPRT0);
hr->hfir = readl(hsotg->regs + HFIR);
+ hr->valid = true;
return 0;
}
@@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore host regs */
- hr = hsotg->hr_backup;
- if (!hr) {
+ hr = &hsotg->hr_backup;
+ if (!hr->valid) {
dev_err(hsotg->dev, "%s: no host registers to restore\n",
__func__);
return -EINVAL;
}
+ hr->valid = false;
writel(hr->hcfg, hsotg->regs + HCFG);
writel(hr->haintmsk, hsotg->regs + HAINTMSK);
@@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup dev regs */
- dr = hsotg->dr_backup;
- if (!dr) {
- dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
- if (!dr) {
- dev_err(hsotg->dev, "%s: can't allocate device regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->dr_backup = dr;
- }
+ dr = &hsotg->dr_backup;
dr->dcfg = readl(hsotg->regs + DCFG);
dr->dctl = readl(hsotg->regs + DCTL);
@@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
}
-
+ dr->valid = true;
return 0;
}
@@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore dev regs */
- dr = hsotg->dr_backup;
- if (!dr) {
+ dr = &hsotg->dr_backup;
+ if (!dr->valid) {
dev_err(hsotg->dev, "%s: no device registers to restore\n",
__func__);
return -EINVAL;
}
+ dr->valid = false;
writel(dr->dcfg, hsotg->regs + DCFG);
writel(dr->dctl, hsotg->regs + DCTL);
@@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
int i;
/* Backup global regs */
- gr = hsotg->gr_backup;
- if (!gr) {
- gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
- if (!gr) {
- dev_err(hsotg->dev, "%s: can't allocate global regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->gr_backup = gr;
- }
+ gr = &hsotg->gr_backup;
gr->gotgctl = readl(hsotg->regs + GOTGCTL);
gr->gintmsk = readl(hsotg->regs + GINTMSK);
@@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
for (i = 0; i < MAX_EPS_CHANNELS; i++)
gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
+ gr->valid = true;
return 0;
}
@@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore global regs */
- gr = hsotg->gr_backup;
- if (!gr) {
+ gr = &hsotg->gr_backup;
+ if (!gr->valid) {
dev_err(hsotg->dev, "%s: no global registers to restore\n",
__func__);
return -EINVAL;
}
+ gr->valid = false;
writel(0xffffffff, hsotg->regs + GINTSTS);
writel(gr->gotgctl, hsotg->regs + GOTGCTL);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 53b8de03f102..0ed87620941b 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -492,6 +492,7 @@ struct dwc2_gregs_backup {
u32 gdfifocfg;
u32 dtxfsiz[MAX_EPS_CHANNELS];
u32 gpwrdn;
+ bool valid;
};
/**
@@ -521,6 +522,7 @@ struct dwc2_dregs_backup {
u32 doepctl[MAX_EPS_CHANNELS];
u32 doeptsiz[MAX_EPS_CHANNELS];
u32 doepdma[MAX_EPS_CHANNELS];
+ bool valid;
};
/**
@@ -538,6 +540,7 @@ struct dwc2_hregs_backup {
u32 hcintmsk[MAX_EPS_CHANNELS];
u32 hprt0;
u32 hfir;
+ bool valid;
};
/**
@@ -705,9 +708,9 @@ struct dwc2_hsotg {
struct work_struct wf_otg;
struct timer_list wkp_timer;
enum dwc2_lx_state lx_state;
- struct dwc2_gregs_backup *gr_backup;
- struct dwc2_dregs_backup *dr_backup;
- struct dwc2_hregs_backup *hr_backup;
+ struct dwc2_gregs_backup gr_backup;
+ struct dwc2_dregs_backup dr_backup;
+ struct dwc2_hregs_backup hr_backup;
struct dentry *debug_root;
struct debugfs_regset32 *regset;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b10377c65064..f845c41fe9e5 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
/* Caller must hold driver lock */
static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
- struct dwc2_hcd_urb *urb, void **ep_handle,
- gfp_t mem_flags)
+ struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+ struct dwc2_qtd *qtd)
{
- struct dwc2_qtd *qtd;
u32 intr_mask;
int retval;
int dev_speed;
@@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
return -ENODEV;
}
- qtd = kzalloc(sizeof(*qtd), mem_flags);
if (!qtd)
- return -ENOMEM;
+ return -EINVAL;
dwc2_hcd_qtd_init(qtd, urb);
- retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
- mem_flags);
+ retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
- kfree(qtd);
return retval;
}
@@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
u32 tflags = 0;
void *buf;
unsigned long flags;
+ struct dwc2_qh *qh;
+ bool qh_allocated = false;
+ struct dwc2_qtd *qtd;
if (dbg_urb(urb)) {
dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
@@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
urb->iso_frame_desc[i].length);
urb->hcpriv = dwc2_urb;
+ qh = (struct dwc2_qh *) ep->hcpriv;
+ /* Create QH for the endpoint if it doesn't exist */
+ if (!qh) {
+ qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
+ if (!qh) {
+ retval = -ENOMEM;
+ goto fail0;
+ }
+ ep->hcpriv = qh;
+ qh_allocated = true;
+ }
+
+ qtd = kzalloc(sizeof(*qtd), mem_flags);
+ if (!qtd) {
+ retval = -ENOMEM;
+ goto fail1;
+ }
spin_lock_irqsave(&hsotg->lock, flags);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
- goto fail1;
+ goto fail2;
- retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags);
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
if (retval)
- goto fail2;
+ goto fail3;
if (alloc_bandwidth) {
dwc2_allocate_bus_bandwidth(hcd,
@@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return 0;
-fail2:
+fail3:
dwc2_urb->priv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
-fail1:
+fail2:
spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
+ kfree(qtd);
+fail1:
+ if (qh_allocated) {
+ struct dwc2_qtd *qtd2, *qtd2_tmp;
+
+ ep->hcpriv = NULL;
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ /* Free each QTD in the QH's QTD list */
+ list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
+ qtd_list_entry)
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
+ dwc2_hcd_qh_free(hsotg, qh);
+ }
fail0:
kfree(dwc2_urb);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7b5841c40033..fc1054965552 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
/* Schedule Queue Functions */
/* Implemented in hcd_queue.c */
extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
+extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags);
extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
@@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, gfp_t mem_flags);
+ struct dwc2_qh *qh);
/* Unlinks and frees a QTD */
static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 9b5c36256627..3ad63d392e13 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
*
* Return: Pointer to the newly allocated QH, or NULL on error
*/
-static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb,
gfp_t mem_flags)
{
@@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
*
* @hsotg: The DWC HCD structure
* @qtd: The QTD to add
- * @qh: Out parameter to return queue head
- * @atomic_alloc: Flag to do atomic alloc if needed
+ * @qh: Queue head to add qtd to
*
* Return: 0 if successful, negative error code otherwise
*
- * Finds the correct QH to place the QTD into. If it does not find a QH, it
- * will create a new QH. If the QH to which the QTD is added is not currently
- * scheduled, it is placed into the proper schedule based on its EP type.
+ * If the QH to which the QTD is added is not currently scheduled, it is placed
+ * into the proper schedule based on its EP type.
*/
int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, gfp_t mem_flags)
+ struct dwc2_qh *qh)
{
- struct dwc2_hcd_urb *urb = qtd->urb;
- int allocated = 0;
int retval;
- /*
- * Get the QH which holds the QTD-list to insert to. Create QH if it
- * doesn't exist.
- */
- if (*qh == NULL) {
- *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
- if (*qh == NULL)
- return -ENOMEM;
- allocated = 1;
+ if (unlikely(!qh)) {
+ dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
+ retval = -EINVAL;
+ goto fail;
}
- retval = dwc2_hcd_qh_add(hsotg, *qh);
+ retval = dwc2_hcd_qh_add(hsotg, qh);
if (retval)
goto fail;
- qtd->qh = *qh;
- list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+ qtd->qh = qh;
+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
return 0;
-
fail:
- if (allocated) {
- struct dwc2_qtd *qtd2, *qtd2_tmp;
- struct dwc2_qh *qh_tmp = *qh;
-
- *qh = NULL;
- dwc2_hcd_qh_unlink(hsotg, qh_tmp);
-
- /* Free each QTD in the QH's QTD list */
- list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
- qtd_list_entry)
- dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
-
- dwc2_hcd_qh_free(hsotg, qh_tmp);
- }
-
return retval;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5c110d8e293b..ff5773c66b84 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
/* Select the HS PHY interface */
switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
- if (!strncmp(dwc->hsphy_interface, "utmi", 4)) {
+ if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "utmi", 4)) {
reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
break;
- } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) {
+ } else if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
} else {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
+ case USB_REQ_SET_INTERFACE:
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+ dwc->start_config_issued = false;
+ /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 4e3447bbd097..58b4657fc721 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1758,10 +1758,13 @@ unknown:
* take such requests too, if that's ever needed: to work
* in config 0, etc.
*/
- list_for_each_entry(f, &cdev->config->functions, list)
- if (f->req_match && f->req_match(f, ctrl))
- goto try_fun_setup;
- f = NULL;
+ if (cdev->config) {
+ list_for_each_entry(f, &cdev->config->functions, list)
+ if (f->req_match && f->req_match(f, ctrl))
+ goto try_fun_setup;
+ f = NULL;
+ }
+
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0495c94a23d7..289e20119fea 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -571,7 +571,7 @@ static struct config_group *function_make(
if (IS_ERR(fi))
return ERR_CAST(fi);
- ret = config_item_set_name(&fi->group.cg_item, name);
+ ret = config_item_set_name(&fi->group.cg_item, "%s", name);
if (ret) {
usb_put_function_instance(fi);
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 45b8c8b338df..6e7be91e6097 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kiocb->private = p;
- kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ if (p->aio)
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
kiocb->private = p;
- kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ if (p->aio)
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
int ret;
ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+ if (ret >= HIDG_MINORS) {
+ ida_simple_remove(&hidg_ida, ret);
+ ret = -ENODEV;
+ }
return ret;
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d2259c663996..f936268d26c6 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
return -EINVAL;
}
- curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+ curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
if (unlikely(!curlun))
return -ENOMEM;
@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
common->luns = curlun;
common->nluns = nluns;
- pr_info("Number of LUNs=%d\n", common->nluns);
-
return 0;
}
EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
struct fsg_common *common = opts->common;
struct fsg_dev *fsg;
+ unsigned nluns, i;
fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
if (unlikely(!fsg))
return ERR_PTR(-ENOMEM);
mutex_lock(&opts->lock);
+ if (!opts->refcnt) {
+ for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
+ if (common->luns[i])
+ nluns = i + 1;
+ if (!nluns)
+ pr_warn("No LUNS defined, continuing anyway\n");
+ else
+ common->nluns = nluns;
+ pr_info("Number of LUNs=%u\n", common->nluns);
+ }
opts->refcnt++;
mutex_unlock(&opts->lock);
+
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 6316aa5b1c49..ad50a67c1465 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
if (opts->id && !midi->id) {
status = -ENOMEM;
mutex_unlock(&opts->lock);
- goto kstrdup_fail;
+ goto setup_fail;
}
midi->in_ports = opts->in_ports;
midi->out_ports = opts->out_ports;
@@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
return &midi->func;
-kstrdup_fail:
- f_midi_unregister_card(midi);
setup_fail:
for (--i; i >= 0; i--)
kfree(midi->in_port[i]);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
static inline int gprinter_get_minor(void)
{
- return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ int ret;
+
+ ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ if (ret >= PRINTER_MINORS) {
+ ida_simple_remove(&printer_ida, ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
}
static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
factor = 1000;
} else {
ep_desc = &hs_epin_desc;
- factor = 125;
+ factor = 8000;
}
/* pre-compute some values for iso_complete() */
uac2->p_framesize = opts->p_ssize *
num_channels(opts->p_chmask);
rate = opts->p_srate * uac2->p_framesize;
- uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+ uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
prm->max_psize);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
/* The current hw dequeue pointer */
tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
deq_ptr_64 = tmp_32;
- tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
deq_ptr_64 |= ((u64)tmp_32 << 32);
/* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index e547ea7f56b1..1137e3384218 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
udc_name, fotg210);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
- goto err_irq;
+ goto err_req;
}
ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
@@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
-err_irq:
free_irq(ires->start, fotg210);
err_req:
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
return -ENODEV;
}
- udc->phy_regs = ioremap(r->start, resource_size(r));
+ udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
+ struct device *dev = gadget->dev.parent;
+
if (req->length == 0)
return 0;
if (req->num_sgs) {
int mapped;
- mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (mapped == 0) {
dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
req->num_mapped_sgs = mapped;
} else {
- req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+ req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (dma_mapping_error(&gadget->dev, req->dma)) {
- dev_err(&gadget->dev, "failed to map buffer\n");
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
}
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
} else {
- dma_unmap_single(&gadget->dev, req->dma, req->length,
+ dma_unmap_single(gadget->dev.parent, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
}
@@ -321,6 +323,7 @@ err4:
err3:
put_device(&udc->dev);
+ device_del(&gadget->dev);
err2:
put_device(&gadget->dev);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
int completed, modified;
__hc32 *prev;
- /* Is this ED already invisible to the hardware? */
- if (ed->state == ED_IDLE)
- goto ed_idle;
-
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
@@ -1012,12 +1008,10 @@ skip_ed:
}
/* ED's now officially unlinked, hc doesn't see */
- ed->state = ED_IDLE;
ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
ed->hwNextED = 0;
wmb();
ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
if (list_empty(&ed->td_list)) {
*last = ed->ed_next;
ed->ed_next = NULL;
+ ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
*last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
#define CCR_PM_CKRNEN 0x0002
#define CCR_PM_USBPW1 0x0004
#define CCR_PM_USBPW2 0x0008
-#define CCR_PM_USBPW3 0x0008
+#define CCR_PM_USBPW3 0x0010
#define CCR_PM_PMEE 0x0100
#define CCR_PM_PMES 0x8000
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
u32 pls = status_reg & PORT_PLS_MASK;
/* resume state is a xHCI internal state.
- * Do not report it to usb core.
+ * Do not report it to usb core, instead, pretend to be U3,
+ * thus usb core knows it's not ready for transfer
*/
- if (pls == XDEV_RESUME)
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
return;
+ }
/* When the CAS bit is set then warm reset
* should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_RESET << 16;
/* USB3.0 only */
if (hcd->speed == HCD_USB3) {
- if ((raw_port_status & PORT_PLC))
+ /* Port link change with port in resume state should not be
+ * reported to usbcore, as this is an internal state to be
+ * handled by xhci driver. Reporting PLC to usbcore may
+ * cause usbcore clearing PLC first and port change event
+ * irq won't be generated.
+ */
+ if ((raw_port_status & PORT_PLC) &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
status |= USB_PORT_STAT_C_LINK_STATE << 16;
if ((raw_port_status & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
- if (bus_state->resuming_ports) {
+ if (bus_state->resuming_ports || /* USB2 */
+ bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "suspend failed because "
- "a port is resuming\n");
+ xhci_dbg(xhci, "suspend failed because a port is resuming\n");
return -EBUSY;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
+ virt_dev->num_rings_cached--;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, type);
}
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ if (timer_pending(&xhci->cmd_timer))
+ del_timer_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/acpi.h>
#include "xhci.h"
#include "xhci-trace.h"
+#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define PROG_DONE (1 << 30)
+#define SSIC_PORT_UNUSED (1 << 31)
+
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
* Make sure PME works on some Intel xHCI controllers by writing 1 to clear
* the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
*/
-static void xhci_pme_quirk(struct xhci_hcd *xhci)
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+ reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+ /* Notify SSIC that SSIC profile programming is not done */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
+
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
val = readl(reg);
writel(val | BIT(28), reg);
readl(reg);
}
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+ static const u8 intel_dsm_uuid[] = {
+ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
+ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
+ };
+ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+}
+#else
+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+ xhci_pme_acpi_rtd3_enable(dev);
+
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
pdev->no_d3cold = true;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, true);
return xhci_suspend(xhci, do_wakeup);
}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
usb_enable_intel_xhci_ports(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, false);
retval = xhci_resume(xhci, hibernated);
return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
- if (segment_offset > TRBS_PER_SEGMENT)
+ if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
usb_hcd_resume_root_hub(hcd);
}
+ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+ bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
return -EINVAL;
}
+ if (virt_dev->tt_info)
+ old_active_eps = virt_dev->tt_info->active_eps;
+
if (virt_dev->udev != udev) {
/* If the virt_dev and the udev does not match, this virt_dev
* may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
#define XDEV_U0 (0x0 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 30842bc195f5..92d5f718659b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb)
#ifdef CONFIG_USB_MUSB_HOST
return 1;
#else
- if (musb->port_mode == MUSB_PORT_MODE_HOST)
- return 1;
- return musb->g.dev.driver != NULL;
+ return musb->port_mode == MUSB_PORT_MODE_HOST;
#endif
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8f7cb068d29b..3fcc0483a081 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
{
unsigned int vbus_value;
+ if (!mxs_phy->regmap_anatop)
+ return false;
+
if (mxs_phy->port_id == 0)
regmap_read(mxs_phy->regmap_anatop,
ANADIG_USB1_VBUS_DET_STAT,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ffd739e31bfc..eac7ccaa3c85 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4f70df33975a..78b4f64c6b00 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock);
static const unsigned int dummy; /* for clarity in register access fns */
enum mos_regs {
- THR, /* serial port regs */
- RHR,
- IER,
- FCR,
- ISR,
- LCR,
- MCR,
- LSR,
- MSR,
- SPR,
- DLL,
- DLM,
- DPR, /* parallel port regs */
- DSR,
- DCR,
- ECR,
- SP1_REG, /* device control regs */
- SP2_REG, /* serial port 2 (7720 only) */
- PP_REG,
- SP_CONTROL_REG,
+ MOS7720_THR, /* serial port regs */
+ MOS7720_RHR,
+ MOS7720_IER,
+ MOS7720_FCR,
+ MOS7720_ISR,
+ MOS7720_LCR,
+ MOS7720_MCR,
+ MOS7720_LSR,
+ MOS7720_MSR,
+ MOS7720_SPR,
+ MOS7720_DLL,
+ MOS7720_DLM,
+ MOS7720_DPR, /* parallel port regs */
+ MOS7720_DSR,
+ MOS7720_DCR,
+ MOS7720_ECR,
+ MOS7720_SP1_REG, /* device control regs */
+ MOS7720_SP2_REG, /* serial port 2 (7720 only) */
+ MOS7720_PP_REG,
+ MOS7720_SP_CONTROL_REG,
};
/*
@@ -150,26 +150,26 @@ enum mos_regs {
static inline __u16 get_reg_index(enum mos_regs reg)
{
static const __u16 mos7715_index_lookup_table[] = {
- 0x00, /* THR */
- 0x00, /* RHR */
- 0x01, /* IER */
- 0x02, /* FCR */
- 0x02, /* ISR */
- 0x03, /* LCR */
- 0x04, /* MCR */
- 0x05, /* LSR */
- 0x06, /* MSR */
- 0x07, /* SPR */
- 0x00, /* DLL */
- 0x01, /* DLM */
- 0x00, /* DPR */
- 0x01, /* DSR */
- 0x02, /* DCR */
- 0x0a, /* ECR */
- 0x01, /* SP1_REG */
- 0x02, /* SP2_REG (7720 only) */
- 0x04, /* PP_REG (7715 only) */
- 0x08, /* SP_CONTROL_REG */
+ 0x00, /* MOS7720_THR */
+ 0x00, /* MOS7720_RHR */
+ 0x01, /* MOS7720_IER */
+ 0x02, /* MOS7720_FCR */
+ 0x02, /* MOS7720_ISR */
+ 0x03, /* MOS7720_LCR */
+ 0x04, /* MOS7720_MCR */
+ 0x05, /* MOS7720_LSR */
+ 0x06, /* MOS7720_MSR */
+ 0x07, /* MOS7720_SPR */
+ 0x00, /* MOS7720_DLL */
+ 0x01, /* MOS7720_DLM */
+ 0x00, /* MOS7720_DPR */
+ 0x01, /* MOS7720_DSR */
+ 0x02, /* MOS7720_DCR */
+ 0x0a, /* MOS7720_ECR */
+ 0x01, /* MOS7720_SP1_REG */
+ 0x02, /* MOS7720_SP2_REG (7720 only) */
+ 0x04, /* MOS7720_PP_REG (7715 only) */
+ 0x08, /* MOS7720_SP_CONTROL_REG */
};
return mos7715_index_lookup_table[reg];
}
@@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg)
static inline __u16 get_reg_value(enum mos_regs reg,
unsigned int serial_portnum)
{
- if (reg >= SP1_REG) /* control reg */
+ if (reg >= MOS7720_SP1_REG) /* control reg */
return 0x0000;
- else if (reg >= DPR) /* parallel port reg (7715 only) */
+ else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */
return 0x0100;
else /* serial port reg */
@@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
enum mos7715_pp_modes mode)
{
mos_parport->shadowECR = mode;
- write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+ mos_parport->shadowECR);
return 0;
}
@@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
if (parport_prologue(pp) < 0)
return;
mos7715_change_mode(mos_parport, SPP);
- write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
parport_epilogue(pp);
}
@@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp)
if (parport_prologue(pp) < 0)
return 0;
- read_mos_reg(mos_parport->serial, dummy, DPR, &d);
+ read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
parport_epilogue(pp);
return d;
}
@@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
if (parport_prologue(pp) < 0)
return;
data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
- write_mos_reg(mos_parport->serial, dummy, DCR, data);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
mos_parport->shadowDCR = data;
parport_epilogue(pp);
}
@@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
if (parport_prologue(pp) < 0)
return 0;
mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
dcr = mos_parport->shadowDCR & 0x0f;
parport_epilogue(pp);
return dcr;
@@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR &= ~0x20;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
parport_epilogue(pp);
}
@@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR |= 0x20;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
parport_epilogue(pp);
}
@@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp,
spin_unlock(&release_lock);
return;
}
- write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR);
- write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR);
+ write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
+ mos_parport->shadowDCR);
+ write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
+ mos_parport->shadowECR);
spin_unlock(&release_lock);
}
@@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial)
init_completion(&mos_parport->syncmsg_compl);
/* cycle parallel port reset bit */
- write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80);
- write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
/* initialize device registers */
mos_parport->shadowDCR = DCR_INIT_VAL;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
mos_parport->shadowECR = ECR_INIT_VAL;
- write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+ mos_parport->shadowECR);
/* register with parport core */
mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
@@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Initialize MCS7720 -- Write Init values to corresponding Registers
*
* Register Index
- * 0 : THR/RHR
- * 1 : IER
- * 2 : FCR
- * 3 : LCR
- * 4 : MCR
- * 5 : LSR
- * 6 : MSR
- * 7 : SPR
+ * 0 : MOS7720_THR/MOS7720_RHR
+ * 1 : MOS7720_IER
+ * 2 : MOS7720_FCR
+ * 3 : MOS7720_LCR
+ * 4 : MOS7720_MCR
+ * 5 : MOS7720_LSR
+ * 6 : MOS7720_MSR
+ * 7 : MOS7720_SPR
*
* 0x08 : SP1/2 Control Reg
*/
port_number = port->port_number;
- read_mos_reg(serial, port_number, LSR, &data);
+ read_mos_reg(serial, port_number, MOS7720_LSR, &data);
dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
- write_mos_reg(serial, dummy, SP1_REG, 0x02);
- write_mos_reg(serial, dummy, SP2_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowLCR = 0x03;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
- write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
- read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
+ write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
+ read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
data = data | (port->port_number + 1);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
mos7720_port->shadowLCR = 0x83;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, THR, 0x0c);
- write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
mos7720_port->shadowLCR = 0x03;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
response = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (response)
@@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port)
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
- write_mos_reg(serial, port->port_number, MCR, 0x00);
- write_mos_reg(serial, port->port_number, IER, 0x00);
+ write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
+ write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
mos7720_port->open = 0;
}
@@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
- write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port->port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
}
/*
@@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
@@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
@@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
port_number = port->port_number;
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
/***********************************************
* Set for higher rates *
***********************************************/
/* writing baud rate verbatum into uart clock field clearly not right */
if (port_number == 0)
- sp_reg = SP1_REG;
+ sp_reg = MOS7720_SP1_REG;
else
- sp_reg = SP2_REG;
+ sp_reg = MOS7720_SP2_REG;
write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
mos7720_port->shadowMCR = 0x2b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/***********************************************
* Set DLL/DLM
***********************************************/
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, DLL, 0x01);
- write_mos_reg(serial, port_number, DLM, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
+ write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
return 0;
}
@@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
/* Enable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
- write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
/* Write the divisor */
- write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff));
- write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8));
+ write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
+ write_mos_reg(serial, number, MOS7720_DLM,
+ (__u8)((divisor & 0xff00) >> 8));
/* Disable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
return status;
}
@@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty,
/* Disable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
/* Send the updated LCR value to the mos7720 */
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/* set up the MCR register and send it to the mos7720 */
mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty,
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
if (port_number)
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+ 0x01);
else
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+ 0x02);
} else
mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
@@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty,
if (baud >= 230400) {
set_higher_rates(mos7720_port, baud);
/* Enable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
return;
}
@@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty,
if (cflag & CBAUD)
tty_encode_baud_rate(tty, baud, baud);
/* Enable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
@@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty,
count = mos7720_chars_in_buffer(tty);
if (count == 0) {
- read_mos_reg(port->serial, port_number, LSR, &data);
+ read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
== (UART_LSR_TEMT | UART_LSR_THRE)) {
dev_dbg(&port->dev, "%s -- Empty\n", __func__);
@@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty,
mcr &= ~UART_MCR_LOOP;
mos7720_port->shadowMCR = mcr;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
return 0;
@@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
}
mos7720_port->shadowMCR = mcr;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
return 0;
@@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial)
}
#endif
/* LSR For Port 1 */
- read_mos_reg(serial, 0, LSR, &data);
+ read_mos_reg(serial, 0, MOS7720_LSR, &data);
dev_dbg(&dev->dev, "LSR:%x\n", data);
return 0;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f0c0c53359ad..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
.driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1765,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
{DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
{DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 529066bbc7e8..46f1f13b41f1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
tty_unregister_driver(usb_serial_tty_driver);
put_tty_driver(usb_serial_tty_driver);
bus_unregister(&usb_serial_bus_type);
+ idr_destroy(&serial_minors);
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+ "ZTE,Incorporated",
+ "ZTE WCDMA Technologies MSM",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
+
/* Reported by Sven Geggus <sven-usbst@geggus.net>
* This encrypted pen drive returns bogus data for the initial READ(10).
*/
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These are mini projectors using USB for both power and video data transport
+ * The usb-storage interface is a virtual windows driver CD, which the gm12u320
+ * driver automatically converts into framebuffer & kms dri device nodes.
+ */
+UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
+ "Grain-media Technology Corp.",
+ "USB3.0 Device GM12U320",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
+
/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
}
EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
+static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
+ char *buf)
+{
+ struct vfio_device *device;
+
+ mutex_lock(&group->device_lock);
+ list_for_each_entry(device, &group->device_list, group_next) {
+ if (!strcmp(dev_name(device->dev), buf)) {
+ vfio_device_get(device);
+ break;
+ }
+ }
+ mutex_unlock(&group->device_lock);
+
+ return device;
+}
+
/*
* Caller must hold a reference to the vfio_device
*/
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
{
struct vfio_device *device;
struct file *filep;
- int ret = -ENODEV;
+ int ret;
if (0 == atomic_read(&group->container_users) ||
!group->container->iommu_driver || !vfio_group_viable(group))
return -EINVAL;
- mutex_lock(&group->device_lock);
- list_for_each_entry(device, &group->device_list, group_next) {
- if (strcmp(dev_name(device->dev), buf))
- continue;
+ device = vfio_device_get_from_name(group, buf);
+ if (!device)
+ return -ENODEV;
- ret = device->ops->open(device->device_data);
- if (ret)
- break;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0) {
- device->ops->release(device->device_data);
- break;
- }
+ ret = device->ops->open(device->device_data);
+ if (ret) {
+ vfio_device_put(device);
+ return ret;
+ }
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- device, O_RDWR);
- if (IS_ERR(filep)) {
- put_unused_fd(ret);
- ret = PTR_ERR(filep);
- device->ops->release(device->device_data);
- break;
- }
+ /*
+ * We can't use anon_inode_getfd() because we need to modify
+ * the f_mode flags directly to allow more than just ioctls
+ */
+ ret = get_unused_fd_flags(O_CLOEXEC);
+ if (ret < 0) {
+ device->ops->release(device->device_data);
+ vfio_device_put(device);
+ return ret;
+ }
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
+ device, O_RDWR);
+ if (IS_ERR(filep)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(filep);
+ device->ops->release(device->device_data);
+ vfio_device_put(device);
+ return ret;
+ }
+
+ /*
+ * TODO: add an anon_inode interface to do this.
+ * Appears to be missing by lack of need rather than
+ * explicitly prevented. Now there's need.
+ */
+ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- vfio_device_get(device);
- atomic_inc(&group->container_users);
+ atomic_inc(&group->container_users);
- fd_install(ret, filep);
- break;
- }
- mutex_unlock(&group->device_lock);
+ fd_install(ret, filep);
return ret;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include "vhost.h"
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+
enum {
- VHOST_MEMORY_MAX_NREGIONS = 64,
VHOST_MEMORY_F_LOG = 0x1,
};
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
+ kvfree(dev->memory);
dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+ const struct vhost_memory_region *r1 = p1, *r2 = p2;
+ if (r1->guest_phys_addr < r2->guest_phys_addr)
+ return 1;
+ if (r1->guest_phys_addr > r2->guest_phys_addr)
+ return -1;
+ return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n)
+ n = vzalloc(size);
+ return n;
+}
+
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
- if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
+ sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+ vhost_memory_reg_sort_cmp, NULL);
if (!memory_access_ok(d, newmem, 0)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
d->vqs[i]->memory = newmem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kfree(oldmem);
+ kvfree(oldmem);
return 0;
}
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
}
if (eventfp != d->log_file) {
filep = d->log_file;
+ d->log_file = eventfp;
ctx = d->log_ctx;
d->log_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
{
- struct vhost_memory_region *reg;
- int i;
+ const struct vhost_memory_region *reg;
+ int start = 0, end = mem->nregions;
- /* linear search is not brilliant, but we really have on the order of 6
- * regions in practice */
- for (i = 0; i < mem->nregions; ++i) {
- reg = mem->regions + i;
- if (reg->guest_phys_addr <= addr &&
- reg->guest_phys_addr + reg->memory_size - 1 >= addr)
- return reg;
+ while (start < end) {
+ int slot = start + (end - start) / 2;
+ reg = mem->regions + slot;
+ if (addr >= reg->guest_phys_addr)
+ end = slot;
+ else
+ start = slot + 1;
}
+
+ reg = mem->regions + start;
+ if (addr >= reg->guest_phys_addr &&
+ reg->guest_phys_addr + reg->memory_size > addr)
+ return reg;
return NULL;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
int y;
int c = scr_readw((u16 *) vc->vc_pos);
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (vc->vc_cursor_type & 0x10)
fbcon_del_cursor_timer(info);
else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
# Helper logic selected only by the ARM Versatile platform family.
config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
depends on ARM
depends on FB_ARMCLCD && FB=y
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
}
prev = port;
} while (of_node_cmp(port->name, "port") != 0);
+
+ of_node_put(ports);
}
return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
if (!port)
return NULL;
- np = of_get_next_parent(port);
+ np = of_get_parent(port);
for (i = 0; i < 2 && np; ++i) {
struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
goto err_free_dma;
}
- ret = clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
dev_err(dev, "failed to enable clock\n");
goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
misc_deregister(&priv->misc_dev);
err_disable_clk:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
index = disp->native_mode;
ret = videomode_from_timings(disp, vm, index);
- if (ret)
- return ret;
display_timings_release(disp);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
static void virtinput_remove(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
+ void *buf;
unsigned long flags;
spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
+ vdev->config->reset(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
kfree(vi);
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index c1b03f4235b9..4e7fec36f5c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for ARM SP805 watchdog module
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2 or later. This program is licensed "as is" without any
@@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * We avoid multiple worker processes conflicting via the balloon mutex.
+ * As this is a work item it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
enum bp_state state = BP_DONE;
long credit;
- mutex_lock(&balloon_mutex);
do {
+ mutex_lock(&balloon_mutex);
+
credit = current_credit();
if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
state = update_schedule(state);
-#ifndef CONFIG_PREEMPT
- if (need_resched())
- schedule();
-#endif
+ mutex_unlock(&balloon_mutex);
+
+ cond_resched();
+
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
-
- mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
+ mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
+ mutex_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
addrs);
- if (!rv)
+ if (!rv) {
vunmap(vaddr);
+ free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+ }
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
node->nr_handles);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
if (ret)
- btrfs_error(root->fs_info, ret, "kobj add dev failed");
+ btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
printk_in_rcu(KERN_INFO
"BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
!extent_buffer_uptodate(chunk_root->node)) {
printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
sb->s_id);
+ chunk_root->node = NULL;
goto fail_tree_roots;
}
btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
!extent_buffer_uptodate(tree_root->node)) {
printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
sb->s_id);
-
+ tree_root->node = NULL;
goto recovery_tree_root;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1c2bd1723e40..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2296,9 +2296,22 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
static inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
+ struct btrfs_delayed_ref_node *ref;
+
if (list_empty(&head->ref_list))
return NULL;
+ /*
+ * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
+ * This is to prevent a ref count from going down to zero, which deletes
+ * the extent item from the extent tree, when there still are references
+ * to add, which would fail because they would not find the extent item.
+ */
+ list_for_each_entry(ref, &head->ref_list, list) {
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ return ref;
+ }
+
return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
list);
}
@@ -4214,6 +4227,24 @@ out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
+ /*
+ * When we allocate a new chunk we reserve space in the chunk block
+ * reserve to make sure we can COW nodes/leafs in the chunk tree or
+ * add new nodes/leafs to it if we end up needing to do it when
+ * inserting the chunk item and updating device items as part of the
+ * second phase of chunk allocation, performed by
+ * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
+ * large number of new block groups to create in our transaction
+ * handle's new_bgs list to avoid exhausting the chunk block reserve
+ * in extreme cases - like having a single transaction create many new
+ * block groups when starting to write out the free space caches of all
+ * the block groups that were made dirty during the lifetime of the
+ * transaction.
+ */
+ if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+ btrfs_create_pending_block_groups(trans, trans->root);
+ btrfs_trans_release_chunk_metadata(trans);
+ }
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b33c0cf02668..e33dff356460 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4209,7 +4209,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
- u64 last_size = (u64)-1;
+ u64 last_size = new_size;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
@@ -4493,8 +4493,7 @@ out:
btrfs_abort_transaction(trans, root, ret);
}
error:
- if (last_size != (u64)-1 &&
- root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ordered_update_i_size(inode, last_size, NULL);
btrfs_free_path(path);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5d91776e12a2..0770c91586ca 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3090,7 +3090,7 @@ out_unlock:
static long btrfs_ioctl_file_extent_same(struct file *file,
struct btrfs_ioctl_same_args __user *argp)
{
- struct btrfs_ioctl_same_args *same;
+ struct btrfs_ioctl_same_args *same = NULL;
struct btrfs_ioctl_same_extent_info *info;
struct inode *src = file_inode(file);
u64 off;
@@ -3120,6 +3120,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
if (IS_ERR(same)) {
ret = PTR_ERR(same);
+ same = NULL;
goto out;
}
@@ -3190,6 +3191,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
out:
mnt_drop_write_file(file);
+ kfree(same);
return ret;
}
@@ -3586,6 +3588,20 @@ process_slot:
u64 trim = 0;
u64 aligned_end = 0;
+ /*
+ * Don't copy an inline extent into an offset
+ * greater than zero. Having an inline extent
+ * at such an offset results in chaos as btrfs
+ * isn't prepared for such cases. Just skip
+ * this case for the same reasons as commented
+ * at btrfs_ioctl_clone().
+ */
+ if (last_dest_end > 0) {
+ ret = -EOPNOTSUPP;
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
+
if (off > key.offset) {
skip = off - key.offset;
new_key.offset += skip;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e9ace099162c..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
/* Exclusive -> exclusive, nothing changed */
}
}
+
+ /* For exclusive extent, free its reserved bytes too */
+ if (nr_old_roots == 0 && nr_new_roots == 1 &&
+ cur_new_count == nr_new_roots)
+ qg->reserved -= num_bytes;
if (dirty)
qgroup_dirty(fs_info, qg);
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c0f18e7266b6..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -761,7 +761,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->ordered)) {
spin_lock(&info->trans_lock);
- list_splice(&trans->ordered, &cur_trans->pending_ordered);
+ list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
spin_unlock(&info->trans_lock);
}
@@ -1866,7 +1866,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
}
spin_lock(&root->fs_info->trans_lock);
- list_splice(&trans->ordered, &cur_trans->pending_ordered);
+ list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
spin_unlock(&root->fs_info->trans_lock);
atomic_inc(&cur_trans->use_count);
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- if (current != root->fs_info->transaction_kthread)
+ if (current != root->fs_info->transaction_kthread &&
+ current != root->fs_info->cleaner_kthread)
btrfs_run_delayed_iputs(root);
return ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
- cf->kick = false;
spin_lock(&mdsc->cap_dirty_lock);
list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
- struct ceph_inode_info *ci,
- bool kick_all)
+ struct ceph_inode_info *ci)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
cf = rb_entry(n, struct ceph_cap_flush, i_node);
- if (cf->tid < first_tid)
- continue;
- if (kick_all || cf->kick)
+ if (cf->tid >= first_tid)
break;
}
if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
}
cf = rb_entry(n, struct ceph_cap_flush, i_node);
- cf->kick = false;
first_tid = cf->tid + 1;
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
{
struct ceph_inode_info *ci;
struct ceph_cap *cap;
- struct ceph_cap_flush *cf;
- struct rb_node *n;
dout("early_kick_flushing_caps mds%d\n", session->s_mds);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) {
spin_unlock(&ci->i_ceph_lock);
- if (!__kick_flushing_caps(mdsc, session, ci, true))
+ if (!__kick_flushing_caps(mdsc, session, ci))
continue;
spin_lock(&ci->i_ceph_lock);
}
- for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- cf->kick = true;
- }
-
spin_unlock(&ci->i_ceph_lock);
}
}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
dout("kick_flushing_caps mds%d\n", session->s_mds);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
- int delayed = __kick_flushing_caps(mdsc, session, ci, false);
+ int delayed = __kick_flushing_caps(mdsc, session, ci);
if (delayed) {
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
spin_unlock(&ci->i_ceph_lock);
- delayed = __kick_flushing_caps(mdsc, session, ci, true);
+ delayed = __kick_flushing_caps(mdsc, session, ci);
if (delayed) {
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
return 0;
spin_lock(&ctx->flc_lock);
- list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
++seen_fcntl;
if (seen_fcntl > num_fcntl_locks) {
err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
struct ceph_cap_flush {
u64 tid;
int caps;
- bool kick;
struct rb_node g_node; // global
union {
struct rb_node i_node; // inode
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 4d6a30e76168..b863a09cd2f1 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -115,7 +115,7 @@ void config_item_init_type_name(struct config_item *item,
const char *name,
struct config_item_type *type)
{
- config_item_set_name(item, name);
+ config_item_set_name(item, "%s", name);
item->ci_type = type;
config_item_init(item);
}
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
void config_group_init_type_name(struct config_group *group, const char *name,
struct config_item_type *type)
{
- config_item_set_name(&group->cg_item, name);
+ config_item_set_name(&group->cg_item, "%s", name);
group->cg_item.ci_type = type;
config_group_init(group);
}
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
+ * @complete_unwritten: The filesystem method used to convert unwritten blocks
+ * to written so the data written to them is exposed. This is required for
+ * required by write faults for filesystems that will return unwritten
+ * extent mappings from @get_block, but it is optional for reads as
+ * dax_insert_mapping() will always zero unwritten blocks. If the fs does
+ * not support unwritten extents, the it should pass NULL.
*
* When a page fault occurs, filesystems may call this helper in their
* fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* as for normal BH based IO completions.
*/
error = dax_insert_mapping(inode, &bh, vma, vmf);
- if (buffer_unwritten(&bh))
- complete_unwritten(&bh, !error);
+ if (buffer_unwritten(&bh)) {
+ if (complete_unwritten)
+ complete_unwritten(&bh, !error);
+ else
+ WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
+ }
out:
if (error == -ENOMEM)
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
inode_init_early();
}
-void __init vfs_caches_init(unsigned long mempages)
+void __init vfs_caches_init(void)
{
- unsigned long reserve;
-
- /* Base hash sizes on available memory, with a reserve equal to
- 150% of current kernel size */
-
- reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
- mempages -= reserve;
-
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dcache_init();
inode_init();
- files_init(mempages);
+ files_init();
+ files_maxfiles_init();
mnt_init();
bdev_cache_init();
chrdev_init();
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
return 1;
}
- mark_inode_dirty(inode);
-
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (ret)
return ret;
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
commit_inmem_pages(inode, false);
+ }
ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
mnt_drop_write_file(filp);
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
return ret;
}
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
f2fs_balance_fs(F2FS_I_SB(inode));
if (f2fs_is_atomic_file(inode)) {
- commit_inmem_pages(inode, false);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ commit_inmem_pages(inode, false);
}
if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (!fio.encrypted_page)
goto put_out;
- f2fs_submit_page_bio(&fio);
+ err = f2fs_submit_page_bio(&fio);
+ if (err)
+ goto put_page_out;
+
+ /* write page */
+ lock_page(fio.encrypted_page);
+
+ if (unlikely(!PageUptodate(fio.encrypted_page)))
+ goto put_page_out;
+ if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+ goto put_page_out;
+
+ set_page_dirty(fio.encrypted_page);
+ f2fs_wait_on_page_writeback(fio.encrypted_page, META);
+ if (clear_page_dirty_for_io(fio.encrypted_page))
+ dec_page_count(fio.sbi, F2FS_DIRTY_META);
+
+ set_page_writeback(fio.encrypted_page);
/* allocate block address */
f2fs_wait_on_page_writeback(dn.node_page, NODE);
-
allocate_data_block(fio.sbi, NULL, fio.blk_addr,
&fio.blk_addr, &sum, CURSEG_COLD_DATA);
- dn.data_blkaddr = fio.blk_addr;
-
- /* write page */
- lock_page(fio.encrypted_page);
- set_page_writeback(fio.encrypted_page);
fio.rw = WRITE_SYNC;
f2fs_submit_page_mbio(&fio);
+ dn.data_blkaddr = fio.blk_addr;
set_data_blkaddr(&dn);
f2fs_update_extent_cache(&dn);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
if (page->index == 0)
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
-
+put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
put_out:
f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
.page = page,
.encrypted_page = NULL,
};
+ set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA);
-
if (clear_page_dirty_for_io(page))
inode_dec_dirty_pages(inode);
set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
kunmap_atomic(dst_addr);
SetPageUptodate(page);
no_update:
+ set_page_dirty(page);
+
/* clear dirty state */
dirty = clear_page_dirty_for_io(page);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
if (!abort) {
lock_page(cur->page);
if (cur->page->mapping == inode->i_mapping) {
+ set_page_dirty(cur->page);
f2fs_wait_on_page_writeback(cur->page, DATA);
if (clear_page_dirty_for_io(cur->page))
inode_dec_dirty_pages(inode);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
#include <linux/hardirq.h>
#include <linux/task_work.h>
#include <linux/ima.h>
+#include <linux/swap.h>
#include <linux/atomic.h>
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
}
}
-void __init files_init(unsigned long mempages)
+void __init files_init(void)
{
- unsigned long n;
-
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+}
- /*
- * One file with associated inode and dcache is very roughly 1K.
- * Per default don't use more than 10% of our memory for files.
- */
+/*
+ * One file with associated inode and dcache is very roughly 1K. Per default
+ * do not use more than 10% of our memory for files.
+ */
+void __init files_maxfiles_init(void)
+{
+ unsigned long n;
+ unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+
+ memreserve = min(memreserve, totalram_pages - 1);
+ n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
- n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
- percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f0520bcf2094..518c6294bf6c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
else
wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
}
+EXPORT_SYMBOL_GPL(wbc_account_io);
/**
* inode_congested - test whether an inode is congested
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
err = -EINVAL;
if (old) {
- struct fuse_dev *fud = fuse_get_dev(old);
+ struct fuse_dev *fud = NULL;
+
+ /*
+ * Check against file->f_op because CUSE
+ * uses the same ioctl handler.
+ */
+ if (old->f_op == file->f_op &&
+ old->f_cred->user_ns == file->f_cred->user_ns)
+ fud = fuse_get_dev(old);
if (fud) {
mutex_lock(&fuse_mutex);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out_dentry;
+ if (creat_flags == HUGETLB_SHMFS_INODE)
+ inode->i_flags |= S_PRIVATE;
file = ERR_PTR(-ENOMEM);
if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index e98d39d75cf4..b9dc23cd04f2 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -76,7 +76,7 @@ static int jfs_open(struct inode *inode, struct file *file)
if (ji->active_ag == -1) {
struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
- atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
+ atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
}
spin_unlock_irq(&ji->ag_lock);
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 6f1cb2b5ee28..41aa3ca6a6a4 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -134,11 +134,11 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* It has been committed since the last change, but was still
* on the dirty inode list.
*/
- if (!test_cflag(COMMIT_Dirty, inode)) {
+ if (!test_cflag(COMMIT_Dirty, inode)) {
/* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
return 0;
- }
+ }
if (jfs_commit_inode(inode, wait)) {
jfs_err("jfs_write_inode: jfs_commit_inode failed!");
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index e33be921aa41..a5ac97b9a933 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1160,7 +1160,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
rc = dtModify(tid, new_dir, &new_dname, &ino,
old_ip->i_ino, JFS_RENAME);
if (rc)
- goto out4;
+ goto out_tx;
drop_nlink(new_ip);
if (S_ISDIR(new_ip->i_mode)) {
drop_nlink(new_ip);
@@ -1185,7 +1185,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
txAbort(tid, 1); /* Marks FS Dirty */
rc = new_size;
- goto out4;
+ goto out_tx;
}
tblk = tid_to_tblock(tid);
tblk->xflag |= COMMIT_DELETE;
@@ -1203,7 +1203,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (rc) {
jfs_err("jfs_rename didn't expect dtSearch to fail "
"w/rc = %d", rc);
- goto out4;
+ goto out_tx;
}
ino = old_ip->i_ino;
@@ -1211,7 +1211,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (rc) {
if (rc == -EIO)
jfs_err("jfs_rename: dtInsert returned -EIO");
- goto out4;
+ goto out_tx;
}
if (S_ISDIR(old_ip->i_mode))
inc_nlink(new_dir);
@@ -1226,7 +1226,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
rc);
txAbort(tid, 1); /* Marks Filesystem dirty */
- goto out4;
+ goto out_tx;
}
if (S_ISDIR(old_ip->i_mode)) {
drop_nlink(old_dir);
@@ -1285,7 +1285,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
rc = txCommit(tid, ipcount, iplist, commit_flag);
- out4:
+ out_tx:
txEnd(tid);
if (new_ip)
mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
@@ -1308,13 +1308,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
if (new_ip && (new_ip->i_nlink == 0))
set_cflag(COMMIT_Nolink, new_ip);
- out3:
- free_UCSname(&new_dname);
- out2:
- free_UCSname(&old_dname);
- out1:
- if (new_ip && !S_ISDIR(new_ip->i_mode))
- IWRITE_UNLOCK(new_ip);
/*
* Truncating the directory index table is not guaranteed. It
* may need to be done iteratively
@@ -1325,7 +1318,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
clear_cflag(COMMIT_Stale, old_dir);
}
-
+ if (new_ip && !S_ISDIR(new_ip->i_mode))
+ IWRITE_UNLOCK(new_ip);
+ out3:
+ free_UCSname(&new_dname);
+ out2:
+ free_UCSname(&old_dname);
+ out1:
jfs_info("jfs_rename: returning %d", rc);
return rc;
}
diff --git a/fs/locks.c b/fs/locks.c
index 653faabb07f4..d3d558ba4da7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
* whether or not a lock was successfully freed by testing the return
* value for -ENOENT.
*/
-static int flock_lock_file(struct file *filp, struct file_lock *request)
+static int flock_lock_inode(struct inode *inode, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
struct file_lock *fl;
struct file_lock_context *ctx;
- struct inode *inode = file_inode(filp);
int error = 0;
bool found = false;
LIST_HEAD(dispose);
@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
goto find_conflict;
list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
- if (filp != fl->fl_file)
+ if (request->fl_file != fl->fl_file)
continue;
if (request->fl_type == fl->fl_type)
goto out;
@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
EXPORT_SYMBOL(posix_lock_file);
/**
- * posix_lock_file_wait - Apply a POSIX-style lock to a file
- * @filp: The file to apply the lock to
+ * posix_lock_inode_wait - Apply a POSIX-style lock to a file
+ * @inode: inode of file to which lock request should be applied
* @fl: The lock to be applied
*
- * Add a POSIX style lock to a file.
- * We merge adjacent & overlapping locks whenever possible.
- * POSIX locks are sorted by owner task, then by starting address
+ * Variant of posix_lock_file_wait that does not take a filp, and so can be
+ * used after the filp has already been torn down.
*/
-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
int error;
might_sleep ();
for (;;) {
- error = posix_lock_file(filp, fl, NULL);
+ error = __posix_lock_file(inode, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
}
return error;
}
-EXPORT_SYMBOL(posix_lock_file_wait);
+EXPORT_SYMBOL(posix_lock_inode_wait);
/**
* locks_mandatory_locked - Check for an active lock
@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
}
/**
- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
- * @filp: The file to apply the lock to
+ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
+ * @inode: inode of the file to apply to
* @fl: The lock to be applied
*
- * Add a FLOCK style lock to a file.
+ * Apply a FLOCK style lock request to an inode.
*/
-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
int error;
might_sleep();
for (;;) {
- error = flock_lock_file(filp, fl);
+ error = flock_lock_inode(inode, fl);
if (error != FILE_LOCK_DEFERRED)
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
}
return error;
}
-
-EXPORT_SYMBOL(flock_lock_file_wait);
+EXPORT_SYMBOL(flock_lock_inode_wait);
/**
* sys_flock: - flock() system call.
@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
.fl_type = F_UNLCK,
.fl_end = OFFSET_MAX,
};
- struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *flctx = inode->i_flctx;
if (list_empty(&flctx->flc_flock))
return;
@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
if (filp->f_op->flock)
filp->f_op->flock(filp, F_SETLKW, &fl);
else
- flock_lock_file(filp, &fl);
+ flock_lock_inode(inode, &fl);
if (fl.fl_ops && fl.fl_ops->fl_release_private)
fl.fl_ops->fl_release_private(&fl);
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
return 0;
/* Allowed if parent directory not sticky and world-writable. */
- parent = nd->path.dentry->d_inode;
+ parent = nd->inode;
if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
@@ -1954,8 +1954,13 @@ OK:
continue;
}
}
- if (unlikely(!d_can_lookup(nd->path.dentry)))
+ if (unlikely(!d_can_lookup(nd->path.dentry))) {
+ if (nd->flags & LOOKUP_RCU) {
+ if (unlazy_walk(nd, NULL, 0))
+ return -ECHILD;
+ }
return -ENOTDIR;
+ }
}
}
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
UMOUNT_PROPAGATE = 2,
UMOUNT_CONNECTED = 4,
};
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+ /* Leaving mounts connected is only valid for lazy umounts */
+ if (how & UMOUNT_SYNC)
+ return true;
+
+ /* A mount without a parent has nothing to be connected to */
+ if (!mnt_has_parent(mnt))
+ return true;
+
+ /* Because the reference counting rules change when mounts are
+ * unmounted and connected, umounted mounts may not be
+ * connected to mounted mounts.
+ */
+ if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+ return true;
+
+ /* Has it been requested that the mount remain connected? */
+ if (how & UMOUNT_CONNECTED)
+ return false;
+
+ /* Is the mount locked such that it needs to remain connected? */
+ if (IS_MNT_LOCKED(mnt))
+ return false;
+
+ /* By default disconnect the mount */
+ return true;
+}
+
/*
* mount_lock must be held
* namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
- disconnect = !(((how & UMOUNT_CONNECTED) &&
- mnt_has_parent(p) &&
- (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
- IS_MNT_LOCKED_AND_LAZY(p));
+ disconnect = disconnect_mount(p, how);
pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
- struct mount *p, *tmp;
- list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- hlist_add_head(&p->mnt_umount.s_list, &unmounted);
- umount_mnt(p);
- }
+ hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+ umount_mnt(mnt);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
server->options = data->options;
server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
- NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
struct nfs42_layoutstat_devinfo *devinfo;
int i;
- for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) {
+ for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
if (*dev_count >= dev_limit)
break;
mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode->i_version = fattr->change_attr;
- else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ else
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_PAGECACHE);
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
- if (cur_size != new_isize && nfsi->nrequests == 0)
+ if (cur_size != new_isize)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
}
+ if (nfsi->nrequests != 0)
+ invalid &= ~NFS_INO_REVAL_PAGECACHE;
/* Have any file permissions changed? */
if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_REVAL_PAGECACHE;
+ | NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
}
- } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+ } else
nfsi->cache_validity |= save_cache_validity;
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
i_size_write(inode, new_isize);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- invalid &= ~NFS_INO_REVAL_PAGECACHE;
}
dprintk("NFS: isize change on server for file %s/%ld "
"(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+ if (!dst || !src)
+ return NULL;
+
+ if (src->len > NFS4_MAXLABELLEN)
+ return NULL;
+
+ dst->lfs = src->lfs;
+ dst->pi = src->pi;
+ dst->len = src->len;
+ memcpy(dst->label, src->label, src->len);
+
+ return dst;
+}
static inline void nfs4_label_free(struct nfs4_label *label)
{
if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
{
}
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+ return NULL;
+}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
-loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
{
struct inode *inode = file_inode(filep);
struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
}
+loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+{
+ struct nfs_server *server = NFS_SERVER(file_inode(filep));
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = _nfs42_proc_llseek(filep, offset, whence);
+ if (err == -ENOTSUPP)
+ return -EOPNOTSUPP;
+ err = nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+
+ return err;
+}
+
+
static void
nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6f228b5af819..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
{
- do_renew_lease(server->nfs_client, timestamp);
+ struct nfs_client *clp = server->nfs_client;
+
+ if (!nfs4_has_session(clp))
+ do_renew_lease(clp, timestamp);
}
struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
clp = session->clp;
do_renew_lease(clp, res->sr_timestamp);
/* Check sequence flags */
- if (res->sr_status_flags != 0)
- nfs4_schedule_lease_recovery(clp);
+ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
nfs41_update_target_slotid(slot->table, slot, res);
break;
case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
struct nfs_open_confirmres c_res;
struct nfs4_string owner_name;
struct nfs4_string group_name;
+ struct nfs4_label *a_label;
struct nfs_fattr f_attr;
struct nfs4_label *f_label;
struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
if (IS_ERR(p->f_label))
goto err_free_p;
+ p->a_label = nfs4_label_alloc(server, gfp_mask);
+ if (IS_ERR(p->a_label))
+ goto err_free_f;
+
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.server = server;
p->o_arg.bitmask = nfs4_bitmask(server, label);
p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
- p->o_arg.label = label;
+ p->o_arg.label = nfs4_label_copy(p->a_label, label);
p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
switch (p->o_arg.claim) {
case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
return p;
err_free_label:
+ nfs4_label_free(p->a_label);
+err_free_f:
nfs4_label_free(p->f_label);
err_free_p:
kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
+ nfs4_label_free(p->a_label);
nfs4_label_free(p->f_label);
dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
{
+ if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
+ return;
if (state->n_wronly)
set_bit(NFS_O_WRONLY_STATE, &state->flags);
if (state->n_rdonly)
set_bit(NFS_O_RDONLY_STATE, &state->flags);
if (state->n_rdwr)
set_bit(NFS_O_RDWR_STATE, &state->flags);
+ set_bit(NFS_OPEN_STATE, &state->flags);
}
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -5439,15 +5452,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
-static int do_vfs_lock(struct file *file, struct file_lock *fl)
+static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
- res = posix_lock_file_wait(file, fl);
+ res = posix_lock_inode_wait(inode, fl);
break;
case FL_FLOCK:
- res = flock_lock_file_wait(file, fl);
+ res = flock_lock_inode_wait(inode, fl);
break;
default:
BUG();
@@ -5484,7 +5497,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
- get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
p->server = NFS_SERVER(inode);
return p;
@@ -5496,7 +5508,6 @@ static void nfs4_locku_release_calldata(void *data)
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_lock_state(calldata->lsp);
put_nfs_open_context(calldata->ctx);
- fput(calldata->fl.fl_file);
kfree(calldata);
}
@@ -5509,7 +5520,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
switch (task->tk_status) {
case 0:
renew_lease(calldata->server, calldata->timestamp);
- do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
+ do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
if (nfs4_update_lock_stateid(calldata->lsp,
&calldata->res.stateid))
break;
@@ -5617,7 +5628,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
mutex_lock(&sp->so_delegreturn_mutex);
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
down_read(&nfsi->rwsem);
- if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
+ if (do_vfs_lock(inode, request) == -ENOENT) {
up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
goto out;
@@ -5758,7 +5769,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->timestamp);
if (data->arg.new_lock) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
- if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
+ if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
rpc_restart_call_prepare(task);
break;
}
@@ -6000,7 +6011,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
- status = do_vfs_lock(request->fl_file, request);
+ status = do_vfs_lock(state->inode, request);
if (status < 0)
goto out;
down_read(&nfsi->rwsem);
@@ -6008,7 +6019,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
- status = do_vfs_lock(request->fl_file, request);
+ status = do_vfs_lock(state->inode, request);
up_read(&nfsi->rwsem);
goto out;
}
@@ -7573,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
goto out;
}
ret = rpc_wait_for_completion_task(task);
- if (!ret) {
- struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
-
- if (task->tk_status == 0)
- nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+ if (!ret)
ret = task->tk_status;
- }
rpc_put_task(task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7967,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct pnfs_layout_hdr *lo = lrp->args.layout;
+ LIST_HEAD(freeme);
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
if (lrp->res.lrs_present)
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
pnfs_clear_layoutreturn_waitbit(lo);
- clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
- rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
pnfs_put_layout_hdr(lrp->args.layout);
nfs_iput_and_deactive(lrp->inode);
kfree(calldata);
@@ -8590,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.init_caps = NFS_CAP_READDIRPLUS
| NFS_CAP_ATOMIC_OPEN
- | NFS_CAP_CHANGE_ATTR
| NFS_CAP_POSIX_LOCK,
.init_client = nfs40_init_client,
.shutdown_client = nfs40_shutdown_client,
@@ -8616,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.init_caps = NFS_CAP_READDIRPLUS
| NFS_CAP_ATOMIC_OPEN
- | NFS_CAP_CHANGE_ATTR
| NFS_CAP_POSIX_LOCK
| NFS_CAP_STATEID_NFSV41
| NFS_CAP_ATOMIC_OPEN_V1,
@@ -8639,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
.minor_version = 2,
.init_caps = NFS_CAP_READDIRPLUS
| NFS_CAP_ATOMIC_OPEN
- | NFS_CAP_CHANGE_ATTR
| NFS_CAP_POSIX_LOCK
| NFS_CAP_STATEID_NFSV41
| NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
}
}
-static void nfs41_handle_state_revoked(struct nfs_client *clp)
+static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
{
nfs4_reset_all_state(clp);
dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
}
+static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
+{
+ nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
+ nfs4_schedule_state_manager(clp);
+
+ dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
+}
+
static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
{
- /* This will need to handle layouts too */
- nfs_expire_all_delegations(clp);
+ /* FIXME: For now, we destroy all layouts. */
+ pnfs_destroy_all_layouts(clp);
+ /* FIXME: For now, we test all delegations+open state+locks. */
+ nfs41_handle_some_state_revoked(clp);
dprintk("%s: Recallable state revoked on server %s!\n", __func__,
clp->cl_hostname);
}
static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
{
- nfs_expire_all_delegations(clp);
- if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
- nfs4_schedule_state_manager(clp);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ nfs4_schedule_state_manager(clp);
+
dprintk("%s: server %s declared a backchannel fault\n", __func__,
clp->cl_hostname);
}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
- if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
- SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
+ nfs41_handle_all_state_revoked(clp);
+ if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED))
- nfs41_handle_state_revoked(clp);
+ nfs41_handle_some_state_revoked(clp);
if (flags & SEQ4_STATUS_LEASE_MOVED)
nfs4_schedule_lease_moved_recovery(clp);
if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
mirror->pg_base = 0;
mirror->pg_recoalesce = 0;
- desc->pg_moreio = 0;
-
while (!list_empty(&head)) {
struct nfs_page *req;
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
nfs_list_remove_request(req);
if (__nfs_pageio_add_request(desc, req))
continue;
- if (desc->pg_error < 0)
+ if (desc->pg_error < 0) {
+ list_splice_tail(&head, &mirror->pg_list);
+ mirror->pg_recoalesce = 1;
return 0;
+ }
break;
}
} while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
{
struct pnfs_layout_segment *s;
- if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
return false;
list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
return true;
}
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+ if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ return false;
+ lo->plh_return_iomode = 0;
+ lo->plh_block_lgets++;
+ pnfs_get_layout_hdr(lo);
+ clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
+ return true;
+}
+
static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
struct pnfs_layout_hdr *lo, struct inode *inode)
{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
if (pnfs_layout_need_return(lo, lseg)) {
nfs4_stateid stateid;
enum pnfs_iomode iomode;
+ bool send;
stateid = lo->plh_stateid;
iomode = lo->plh_return_iomode;
- /* decreased in pnfs_send_layoutreturn() */
- lo->plh_block_lgets++;
- lo->plh_return_iomode = 0;
+ send = pnfs_prepare_layoutreturn(lo);
spin_unlock(&inode->i_lock);
- pnfs_get_layout_hdr(lo);
-
- /* Send an async layoutreturn so we dont deadlock */
- pnfs_send_layoutreturn(lo, stateid, iomode, false);
+ if (send) {
+ /* Send an async layoutreturn so we dont deadlock */
+ pnfs_send_layoutreturn(lo, stateid, iomode, false);
+ }
} else
spin_unlock(&inode->i_lock);
}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+ if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
if (atomic_dec_and_test(&lseg->pls_refcount)) {
struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+ return;
pnfs_get_layout_hdr(lo);
pnfs_layout_remove_lseg(lo, lseg);
pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
smp_mb__after_atomic();
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+ rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}
static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
LIST_HEAD(tmp_list);
nfs4_stateid stateid;
int status = 0, empty;
+ bool send;
dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
/* Don't send a LAYOUTRETURN if list was initially empty */
if (empty) {
spin_unlock(&ino->i_lock);
- pnfs_put_layout_hdr(lo);
dprintk("NFS: %s no layout segments to return\n", __func__);
- goto out;
+ goto out_put_layout_hdr;
}
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- lo->plh_block_lgets++;
+ send = pnfs_prepare_layoutreturn(lo);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
-
- status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+ if (send)
+ status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+out_put_layout_hdr:
+ pnfs_put_layout_hdr(lo);
out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
out_noroc:
if (lo) {
stateid = lo->plh_stateid;
- layoutreturn =
- test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
- &lo->plh_flags);
- if (layoutreturn) {
- lo->plh_block_lgets++;
- pnfs_get_layout_hdr(lo);
- }
+ if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags))
+ layoutreturn = pnfs_prepare_layoutreturn(lo);
}
spin_unlock(&ino->i_lock);
if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
struct pnfs_layout_segment *lseg;
nfs4_stateid stateid;
u32 current_seqid;
- bool found = false, layoutreturn = false;
+ bool layoutreturn = false;
spin_lock(&ino->i_lock);
- list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
- if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
- rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
- found = true;
- goto out;
- }
+ list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
+ if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
+ continue;
+ if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+ continue;
+ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+ spin_unlock(&ino->i_lock);
+ return true;
+ }
lo = nfsi->layout;
current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
* a barrier, we choose the worst-case barrier.
*/
*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
-out:
- if (!found) {
- stateid = lo->plh_stateid;
- layoutreturn =
- test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
- &lo->plh_flags);
- if (layoutreturn) {
- lo->plh_block_lgets++;
- pnfs_get_layout_hdr(lo);
- }
- }
+ stateid = lo->plh_stateid;
+ if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+ &lo->plh_flags))
+ layoutreturn = pnfs_prepare_layoutreturn(lo);
+ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+
spin_unlock(&ino->i_lock);
if (layoutreturn) {
- rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
+ return true;
}
- return found;
+ return false;
}
/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
spin_lock(&inode->i_lock);
/* set failure bit so that pnfs path will be retried later */
pnfs_layout_set_fail_bit(lo, iomode);
- set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
if (lo->plh_return_iomode == 0)
lo->plh_return_iomode = range.iomode;
else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
if (ld->prepare_layoutcommit) {
status = ld->prepare_layoutcommit(&data->args);
if (status) {
+ put_rpccred(data->cred);
spin_lock(&inode->i_lock);
set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- spin_unlock(&inode->i_lock);
- put_rpccred(data->cred);
- goto clear_layoutcommitting;
+ goto out_unlock;
}
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
{
struct nfs_pgio_args *argp = &hdr->args;
struct nfs_pgio_res *resp = &hdr->res;
+ u64 size = argp->offset + resp->count;
if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+ fattr->size = size;
+ if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
+ fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
return;
- if (argp->offset + resp->count != fattr->size)
- return;
- if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
+ }
+ if (size != fattr->size)
return;
/* Set attribute barrier */
nfs_fattr_set_barrier(fattr);
+ /* ...and update size */
+ fattr->valid |= NFS_ATTR_FATTR_SIZE;
}
void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
{
- struct nfs_fattr *fattr = hdr->res.fattr;
+ struct nfs_fattr *fattr = &hdr->fattr;
struct inode *inode = hdr->inode;
- if (fattr == NULL)
- return;
spin_lock(&inode->i_lock);
nfs_writeback_check_extend(hdr, fattr);
nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
BUG_ON(!ls->ls_file);
if (nfsd4_layout_setlease(ls)) {
+ fput(ls->ls_file);
put_nfs4_file(fp);
kmem_cache_free(nfs4_layout_stateid_cache, ls);
return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{
- if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+ if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
{
__be32 status;
- status = nfs4_check_fh(fhp, ols);
- if (status)
- return status;
status = nfsd4_check_openowner_confirmed(ols);
if (status)
return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
status = nfserr_bad_stateid;
break;
}
+ if (status)
+ goto out;
+ status = nfs4_check_fh(fhp, s);
done:
if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
return status;
- return nfs4_check_fh(current_fh, stp);
+ return nfs4_check_fh(current_fh, &stp->st_stid);
}
/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
{ return 0; }
#endif
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
+ *bmval2 &= WORD2_ABSENT_FS_ATTRS;
return 0;
}
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
if (exp->ex_fslocs.migrated) {
- BUG_ON(bmval[2]);
- status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+ status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
if (status)
goto out;
}
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
- bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+ bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(to_free);
+ /*
+ * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+ * to_free list so we have to use mark_mutex even when accessing that
+ * list. And freeing mark requires us to drop mark_mutex. So we can
+ * reliably free only the first mark in the list. That's why we first
+ * move marks to free to to_free list in one go and then free marks in
+ * to_free list one by one.
+ */
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
- if (mark->flags & flags) {
- fsnotify_get_mark(mark);
- fsnotify_destroy_mark_locked(mark, group);
- fsnotify_put_mark(mark);
- }
+ if (mark->flags & flags)
+ list_move(&mark->g_list, &to_free);
}
mutex_unlock(&group->mark_mutex);
+
+ while (1) {
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ if (list_empty(&to_free)) {
+ mutex_unlock(&group->mark_mutex);
+ break;
+ }
+ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+ fsnotify_get_mark(mark);
+ fsnotify_destroy_mark_locked(mark, group);
+ mutex_unlock(&group->mark_mutex);
+ fsnotify_put_mark(mark);
+ }
}
/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
u64 s = i_size_read(inode);
- sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+ sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
(do_div(s, osb->s_clustersize) >> 9);
ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
ret = blkdev_issue_zeroout(osb->sb->s_bdev,
- p_cpos << (osb->s_clustersize_bits - 9),
+ (u64)p_cpos << (osb->s_clustersize_bits - 9),
zero_len_head >> 9, GFP_NOFS, false);
if (ret < 0)
mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
osb->dc_work_sequence = osb->dc_wake_sequence;
processed = osb->blocked_lock_count;
- while (processed) {
- BUG_ON(list_empty(&osb->blocked_lock_list));
-
+ /*
+ * blocked lock processing in this loop might call iput which can
+ * remove items off osb->blocked_lock_list. Downconvert up to
+ * 'processed' number of locks, but stop short if we had some
+ * removed in ocfs2_mark_lockres_freeing when downconverting.
+ */
+ while (processed && !list_empty(&osb->blocked_lock_list)) {
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
- (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index d751fcb637bb..1ade1206bb89 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -75,3 +75,9 @@ config PROC_PAGE_MONITOR
config PROC_CHILDREN
bool "Include /proc/<pid>/task/<tid>/children file"
default n
+ help
+ Provides a fast way to retrieve first level children pids of a task. See
+ <file:Documentation/filesystems/proc.txt> for more information.
+
+ Say Y if you are running any user-space software which takes benefit from
+ this interface. For example, rkt is such a piece of software.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 87782e874b6a..aa50d1ac28fc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -243,6 +243,11 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
len1 = arg_end - arg_start;
len2 = env_end - env_start;
+ /* Empty ARGV. */
+ if (len1 == 0) {
+ rv = 0;
+ goto out_free_page;
+ }
/*
* Inherently racy -- command line shares address space
* with code and data.
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 91a4e6426321..92e6726f6e37 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) +
roundup(sizeof(struct elf_prpsinfo), 4) +
- roundup(sizeof(struct task_struct), 4);
+ roundup(arch_task_struct_size, 4);
*elf_buflen = PAGE_ALIGN(*elf_buflen);
return size + *elf_buflen;
}
@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
/* set up the task structure */
notes[2].name = CORE_STR;
notes[2].type = NT_TASKSTRUCT;
- notes[2].datasz = sizeof(struct task_struct);
+ notes[2].datasz = arch_task_struct_size;
notes[2].data = current;
nhdr->p_filesz += notesize(&notes[2]);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (kinfo->si_code == BUS_MCEERR_AR ||
- kinfo->si_code == BUS_MCEERR_AO)
+ if (kinfo->si_signo == SIGBUS &&
+ (kinfo->si_code == BUS_MCEERR_AR ||
+ kinfo->si_code == BUS_MCEERR_AO))
err |= __put_user((short) kinfo->si_addr_lsb,
&uinfo->ssi_addr_lsb);
#endif
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6afac3d561ac..8d0b3ade0ff0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
- use->descTag.tagLocation =
- cpu_to_le32(iinfo->i_location.logicalBlockNum);
- crclen = sizeof(struct unallocSpaceEntry) +
- iinfo->i_lenAlloc - sizeof(struct tag);
- use->descTag.descCRCLength = cpu_to_le16(crclen);
- use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
- sizeof(struct tag),
- crclen));
- use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
+ crclen = sizeof(struct unallocSpaceEntry);
- goto out;
+ goto finish;
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
crclen = sizeof(struct extendedFileEntry);
}
+
+finish:
if (iinfo->i_strat4096) {
fe->icbTag.strategyType = cpu_to_le16(4096);
fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
fe->icbTag.numEntries = cpu_to_le16(1);
}
- if (S_ISDIR(inode->i_mode))
+ if (iinfo->i_use)
+ fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
+ else if (S_ISDIR(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
else if (S_ISREG(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
-out:
set_buffer_uptodate(bh);
unlock_buffer(bh);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ int blksize = mp->m_attr_geo->blksize;
char *ptr;
int len;
xfs_daddr_t bno;
- int blksize = mp->m_attr_geo->blksize;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
ASSERT(len >= blksize);
while (len > 0) {
+ struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+
if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
xfs_buf_ioerror(bp, -EFSCORRUPTED);
xfs_verifier_error(bp);
return;
}
- if (bip) {
- struct xfs_attr3_rmt_hdr *rmt;
- rmt = (struct xfs_attr3_rmt_hdr *)ptr;
- rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ /*
+ * Ensure we aren't writing bogus LSNs to disk. See
+ * xfs_attr3_rmt_hdr_set() for the explanation.
+ */
+ if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
}
xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
rmt->rm_owner = cpu_to_be64(ino);
rmt->rm_blkno = cpu_to_be64(bno);
+ /*
+ * Remote attribute blocks are written synchronously, so we don't
+ * have an LSN that we can stamp in them that makes any sense to log
+ * recovery. To ensure that log recovery handles overwrites of these
+ * blocks sanely (i.e. once they've been freed and reallocated as some
+ * other type of metadata) we need to ensure that the LSN has a value
+ * that tells log recovery to ignore the LSN and overwrite the buffer
+ * with whatever is in it's log. To do this, we use the magic
+ * NULLCOMMITLSN to indicate that the LSN is invalid.
+ */
+ rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
+
return sizeof(struct xfs_attr3_rmt_hdr);
}
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
/*
* Allocate a single extent, up to the size of the value.
+ *
+ * Note that we have to consider this a data allocation as we
+ * write the remote attribute without logging the contents.
+ * Hence we must ensure that we aren't using blocks that are on
+ * the busy list so that we don't overwrite blocks which have
+ * recently been freed but their transactions are not yet
+ * committed to disk. If we overwrite the contents of a busy
+ * extent and then crash then the block may not contain the
+ * correct metadata after log recovery occurs.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- args->firstblock, args->total, &map, &nmap,
- args->flist);
+ blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
+ args->total, &map, &nmap, args->flist);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file));
+ struct inode *inode = file_inode(vma->vm_file);
int ret;
- trace_xfs_filemap_fault(ip);
+ trace_xfs_filemap_fault(XFS_I(inode));
/* DAX can shortcut the normal fault path on write faults! */
- if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip)))
+ if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
return xfs_filemap_page_mkwrite(vma, vmf);
- xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
- ret = filemap_fault(vma, vmf);
- xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+ xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ if (IS_DAX(inode)) {
+ /*
+ * we do not want to trigger unwritten extent conversion on read
+ * faults - that is unnecessary overhead and would also require
+ * changes to xfs_get_blocks_direct() to map unwritten extent
+ * ioend for conversion on read-only mappings.
+ */
+ ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
+ } else
+ ret = filemap_fault(vma, vmf);
+ xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
return ret;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
break;
case XFS_ATTR3_RMT_MAGIC:
- lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
- uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
- break;
+ /*
+ * Remote attr blocks are written synchronously, rather than
+ * being logged. That means they do not contain a valid LSN
+ * (i.e. transactionally ordered) in them, and hence any time we
+ * see a buffer to replay over the top of a remote attribute
+ * block we should simply do so.
+ */
+ goto recover_immediately;
case XFS_SB_MAGIC:
lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000000..5ff0e5193f85
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
+/*
+ * Architecture specific mm hooks
+ */
+
+#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
+#define _ASM_GENERIC_MM_ARCH_HOOKS_H
+
+/*
+ * This file should be included through arch/../include/asm/Kbuild for
+ * the architecture which doesn't need specific mm hooks.
+ *
+ * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
+ * are used.
+ */
+
+#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 48db6a56975f..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
- unsigned long count;
+ u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 57ca8cc383a6..3b4d8a4a23fb 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -743,8 +743,6 @@ struct drm_connector {
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
-
- struct list_head destroy_list;
};
/**
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector? (optional)
* @best_encoder: return the preferred encoder for this connector
+ * @atomic_best_encoder: atomic version of @best_encoder
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+ struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
+ struct drm_connector_state *connector_state);
};
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 799050198323..53c53c459b15 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
}
/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set
*/
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
index c7df89f99115..58fe9e8b6fd7 100644
--- a/include/linux/amba/sp810.h
+++ b/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
* ARM PrimeXsys System Controller SP810 header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fed36418dd1c..d2992bfa1706 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
ATA_SECT_SIZE = 512,
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
+ ATA_MAX_SECTORS_1024 = 1024,
ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
ATA_MAX_SECTORS_TAPE = 65535,
@@ -384,8 +385,6 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
- SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -529,8 +528,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -719,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
return false;
}
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 58cfab80dd70..1b62d768c7df 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -47,6 +47,7 @@ struct blkcg {
struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
+ struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
#endif
@@ -88,18 +89,12 @@ struct blkg_policy_data {
* Policies that need to keep per-blkcg data which is independent
* from any request_queue associated to it must specify its size
* with the cpd_size field of the blkcg_policy structure and
- * embed a blkcg_policy_data in it. blkcg core allocates
- * policy-specific per-blkcg structures lazily the first time
- * they are actually needed, so it handles them together with
- * blkgs. cpd_init() is invoked to let each policy handle
- * per-blkcg data.
+ * embed a blkcg_policy_data in it. cpd_init() is invoked to let
+ * each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
/* the policy id this per-policy data belongs to */
int plid;
-
- /* used during policy activation */
- struct list_head alloc_node;
};
/* association between a blk cgroup and a request queue */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index b6a52a4b457a..51bb6532785c 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -27,10 +27,12 @@
/**
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
+ * @skbcnt: atomic counter to have an unique id together with skb pointer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
+ int skbcnt;
struct can_frame cf[0];
};
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index a240b18e86fa..08bffcc466de 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -33,18 +33,19 @@ struct clk_lookup {
}
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...);
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...);
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, const char *, struct device *);
-int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdev(struct clk *, const char *, const char *, ...)
+ __printf(3, 4);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ab25814690bc..a76c9172b2eb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-extern int compat_printk(const char *fmt, ...);
+extern __printf(1, 2) int compat_printk(const char *fmt, ...);
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c9e5c57e4edf..63a36e89d0eb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,7 +64,8 @@ struct config_item {
struct dentry *ci_dentry;
};
-extern int config_item_set_name(struct config_item *, const char *, ...);
+extern __printf(2, 3)
+int config_item_set_name(struct config_item *, const char *, ...);
static inline char *config_item_name(struct config_item * item)
{
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
__u64 mm_reg_addr;
};
-/* Memory Error Section */
+/* Old Memory Error Section UEFI 2.1, 2.2 */
+struct cper_sec_mem_err_old {
+ __u64 validation_bits;
+ __u64 error_status;
+ __u64 physical_addr;
+ __u64 physical_addr_mask;
+ __u16 node;
+ __u16 card;
+ __u16 module;
+ __u16 bank;
+ __u16 device;
+ __u16 row;
+ __u16 column;
+ __u16 bit_pos;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 target_id;
+ __u8 error_type;
+};
+
+/* Memory Error Section UEFI >= 2.3 */
struct cper_sec_mem_err {
__u64 validation_bits;
__u64 error_status;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c0fb6b1b4712..23c30bdcca86 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
-extern struct device *cpu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
+extern __printf(4, 5)
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
cpumask_var_t related_cpus; /* Online + Offline CPUs */
+ cpumask_var_t real_cpus; /* Related and present */
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d2d50249b7b2..d67ae119cf4e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -327,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(4, 5)
+char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
diff --git a/include/linux/device.h b/include/linux/device.h
index 5a31bf3a4024..a2b4ea70a946 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -637,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
-extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap);
+extern __printf(3, 0)
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap);
extern __printf(3, 4)
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -1011,12 +1012,10 @@ extern int __must_check device_reprobe(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern struct device *device_create_vargs(struct class *cls,
- struct device *parent,
- dev_t devt,
- void *drvdata,
- const char *fmt,
- va_list vargs);
+extern __printf(5, 0)
+struct device *device_create_vargs(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
+ const char *fmt, va_list vargs);
extern __printf(5, 6)
struct device *device_create(struct class *cls, struct device *parent,
dev_t devt, void *drvdata,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a0653e560c26..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
-extern void __init files_init(unsigned long);
+extern void __init files_init(void);
+extern void __init files_maxfiles_init(void);
extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
@@ -1046,12 +1047,12 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
extern int posix_unblock_lock(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1137,7 +1138,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline int posix_lock_inode_wait(struct inode *inode,
+ struct file_lock *fl)
{
return -ENOLCK;
}
@@ -1163,8 +1165,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return 0;
}
-static inline int flock_lock_file_wait(struct file *filp,
- struct file_lock *request)
+static inline int flock_lock_inode_wait(struct inode *inode,
+ struct file_lock *request)
{
return -ENOLCK;
}
@@ -1202,6 +1204,20 @@ static inline void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_inode;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return posix_lock_inode_wait(file_inode(filp), fl);
+}
+
+static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return flock_lock_inode_wait(file_inode(filp), fl);
+}
struct fasync_struct {
spinlock_t fa_lock;
@@ -2011,11 +2027,6 @@ extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern int generic_update_time(struct inode *, struct timespec *, int);
-static inline struct inode *file_inode(const struct file *f)
-{
- return f->f_inode;
-}
-
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2235,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
+extern void __init vfs_caches_init(void);
extern struct kmem_cache *names_cachep;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* SAVE_REGS. If another ops with this flag set is already registered
* for any of the functions that this ops will be registered for, then
* this ops will fail to register or set_filter_ip.
+ * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
FTRACE_OPS_FL_MODIFYING = 1 << 11,
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
+ FTRACE_OPS_FL_PID = 1 << 14,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
struct ftrace_ops *next;
unsigned long flags;
void *private;
+ ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
int nr_trampolines;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index cc7ec129b329..c8393cd4d44f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -45,7 +45,7 @@ struct seq_file;
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
- * base offset of GPIO chips is deprecated. Please pass -1 as base to
+ * offset of GPIO chips is deprecated. Please pass -1 as base to
* let gpiolib select the chip base in all possible cases. We want to
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0042bf330b99..c02b5ce6c5cd 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -230,6 +230,7 @@ struct hid_sensor_common {
struct platform_device *pdev;
unsigned usage_id;
atomic_t data_ready;
+ atomic_t user_requested_state;
struct iio_trigger *trigger;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 205026175c42..d891f949466a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
-static inline bool hugepages_supported(void)
-{
- /*
- * Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- return HPAGE_SHIFT != 0;
-}
+#ifndef hugepages_supported
+/*
+ * Some platform decide whether they support huge pages at boot
+ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
+ * when there is no such support
+ */
+#define hugepages_supported() (HPAGE_SHIFT != 0)
+#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/include/linux/init.h b/include/linux/init.h
index 7c68c36d3fd8..b449f378f995 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -282,68 +282,8 @@ void __init parse_early_param(void);
void __init parse_early_options(char *cmdline);
#endif /* __ASSEMBLY__ */
-/**
- * module_init() - driver initialization entry point
- * @x: function to be run at kernel boot time or module insertion
- *
- * module_init() will either be called during do_initcalls() (if
- * builtin) or at module insertion time (if a module). There can only
- * be one per module.
- */
-#define module_init(x) __initcall(x);
-
-/**
- * module_exit() - driver exit entry point
- * @x: function to be run when driver is removed
- *
- * module_exit() will wrap the driver clean-up code
- * with cleanup_module() when used with rmmod when
- * the driver is a module. If the driver is statically
- * compiled into the kernel, module_exit() has no effect.
- * There can only be one per module.
- */
-#define module_exit(x) __exitcall(x);
-
#else /* MODULE */
-/*
- * In most cases loadable modules do not need custom
- * initcall levels. There are still some valid cases where
- * a driver may be needed early if built in, and does not
- * matter when built as a loadable module. Like bus
- * snooping debug drivers.
- */
-#define early_initcall(fn) module_init(fn)
-#define core_initcall(fn) module_init(fn)
-#define core_initcall_sync(fn) module_init(fn)
-#define postcore_initcall(fn) module_init(fn)
-#define postcore_initcall_sync(fn) module_init(fn)
-#define arch_initcall(fn) module_init(fn)
-#define subsys_initcall(fn) module_init(fn)
-#define subsys_initcall_sync(fn) module_init(fn)
-#define fs_initcall(fn) module_init(fn)
-#define fs_initcall_sync(fn) module_init(fn)
-#define rootfs_initcall(fn) module_init(fn)
-#define device_initcall(fn) module_init(fn)
-#define device_initcall_sync(fn) module_init(fn)
-#define late_initcall(fn) module_init(fn)
-#define late_initcall_sync(fn) module_init(fn)
-
-#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
-
-/* Each module must use one module_init(). */
-#define module_init(initfn) \
- static inline initcall_t __inittest(void) \
- { return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
-
-/* This is only required if you want to be unloadable. */
-#define module_exit(exitfn) \
- static inline exitcall_t __exittest(void) \
- { return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
-
#define __setup_param(str, unique_id, fn) /* nothing */
#define __setup(str, func) /* nothing */
#endif
@@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline);
/* Data marked not to be saved by software suspend */
#define __nosavedata __section(.data..nosave)
-/* This means "can be init if no module support, otherwise module load
- may call it." */
-#ifdef CONFIG_MODULES
-#define __init_or_module
-#define __initdata_or_module
-#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
-#else
-#define __init_or_module __init
-#define __initdata_or_module __initdata
-#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
-#endif /*CONFIG_MODULES*/
-
#ifdef MODULE
#define __exit_p(x) x
#else
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index dc767f7c3704..f9c1b6d0f2e4 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -258,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
struct device *iommu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
- const char *fmt, ...);
+ const char *fmt, ...) __printf(4, 5);
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0225bb..51744bcf74ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5f0be58640ea..5582410727cb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -411,7 +411,8 @@ extern __printf(3, 0)
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
@@ -679,10 +680,10 @@ do { \
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
-extern int
+extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-extern int
+extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -702,7 +703,7 @@ int trace_printk(const char *fmt, ...)
{
return 0;
}
-static inline int
+static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d61b909f414..637f67002c5a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,8 +80,9 @@ struct kobject {
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+extern __printf(2, 0)
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9564fd78c547..05e99b8ef465 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+ return false;
+}
+#endif
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 36ce37bcc963..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -431,6 +431,8 @@ enum {
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
+ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
+ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f92dbac..bf6f117fcf4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page)
}
/*
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+ /*
+ * Page index cannot be this large so this must be
+ * a pfmemalloc page.
+ */
+ return page->index == -1UL;
+}
+
+/*
+ * Only to be called by the page allocator on a freshly allocated
+ * page.
+ */
+static inline void set_page_pfmemalloc(struct page *page)
+{
+ page->index = -1UL;
+}
+
+static inline void clear_page_pfmemalloc(struct page *page)
+{
+ page->index = 0;
+}
+
+/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7466fd..15549578d559 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@ struct page {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
- bool pfmemalloc; /* If set by the page allocator,
- * ALLOC_NO_WATERMARKS was set
- * and the low watermark was not
- * met implying that the system
- * is under some pressure. The
- * caller should try ensure
- * this page is only used to
- * free other pages.
- */
};
union {
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index c5d52780d6a0..3ba327af055c 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
extern void disable_mmiotrace(void);
extern void mmio_trace_rw(struct mmiotrace_rw *rw);
extern void mmio_trace_mapping(struct mmiotrace_map *map);
-extern int mmio_trace_printk(const char *fmt, va_list args);
+extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index d67b1932cc59..3a19c79918e0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
+#include <linux/init.h>
#include <linux/elf.h>
#include <linux/stringify.h>
#include <linux/kobject.h>
@@ -71,6 +72,89 @@ extern struct module_attribute module_uevent;
extern int init_module(void);
extern void cleanup_module(void);
+#ifndef MODULE
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module). There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module. If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn) module_init(fn)
+#define core_initcall(fn) module_init(fn)
+#define core_initcall_sync(fn) module_init(fn)
+#define postcore_initcall(fn) module_init(fn)
+#define postcore_initcall_sync(fn) module_init(fn)
+#define arch_initcall(fn) module_init(fn)
+#define subsys_initcall(fn) module_init(fn)
+#define subsys_initcall_sync(fn) module_init(fn)
+#define fs_initcall(fn) module_init(fn)
+#define fs_initcall_sync(fn) module_init(fn)
+#define rootfs_initcall(fn) module_init(fn)
+#define device_initcall(fn) module_init(fn)
+#define device_initcall_sync(fn) module_init(fn)
+#define late_initcall(fn) module_init(fn)
+#define late_initcall_sync(fn) module_init(fn)
+
+#define console_initcall(fn) module_init(fn)
+#define security_initcall(fn) module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn) \
+ static inline exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#endif
+
+/* This means "can be init if no module support, otherwise module load
+ may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
/* Archs provide a method of finding the correct exception table. */
struct exception_table_entry;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f25e2bdd188c..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -178,17 +178,17 @@ typedef enum {
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
-/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
#define NAND_BUSWIDTH_AUTO 0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
+ NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-#define NFS_CAP_CHANGE_ATTR (1U << 5)
+/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
#define NFS_CAP_FILEID (1U << 6)
#define NFS_CAP_MODE (1U << 7)
#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
return 0;
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
1 << PG_private | 1 << PG_private_2 | \
1 << PG_writeback | 1 << PG_reserved | \
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+ 1 << PG_unevictable | __PG_MLOCKED | \
__PG_COMPOUND_LOCK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have any flags set. It they are set,
+ * Pages being prepped should not have these flags set. It they are set,
* there has been a kernel bug or struct page corruption.
+ *
+ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
+ * alloc-free cycle to prevent from reusing the page.
*/
-#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
+#define PAGE_FLAGS_CHECK_AT_PREP \
+ (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
(1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index b48c3471c254..cacaabea8a09 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
+extern gfp_t __get_page_owner_gfp(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
+
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ if (likely(!page_owner_inited))
+ return 0;
+
+ return __get_page_owner_gfp(page);
+}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ return 0;
+}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index 3cc21c9cc1e8..9fade5dd2e86 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller platform data header file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
#ifndef __MACB_PDATA_H__
#define __MACB_PDATA_H__
+/**
+ * struct macb_platform_data - platform data for MACB Ethernet
+ * @phy_mask: phy mask passed when register the MDIO bus
+ * within the driver
+ * @phy_irq_pin: PHY IRQ
+ * @is_rmii: using RMII interface?
+ * @rev_eth_addr: reverse Ethernet address byte order
+ */
struct macb_platform_data {
u32 phy_mask;
- int phy_irq_pin; /* PHY IRQ */
- u8 is_rmii; /* using RMII interface? */
- u8 rev_eth_addr; /* reverse Ethernet address byte order */
+ int phy_irq_pin;
+ u8 is_rmii;
+ u8 rev_eth_addr;
};
#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
- unsigned int f_max;
bool support_vsel;
unsigned int delay_line;
};
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 58b1fec40d37..a6298b27ac99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -122,7 +122,7 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-typedef int(*printk_func_t)(const char *fmt, va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
@@ -166,7 +166,7 @@ char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
-void dump_stack_set_arch_desc(const char *fmt, ...);
+__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
#else
@@ -217,7 +217,7 @@ static inline void setup_log_buf(int early)
{
}
-static inline void dump_stack_set_arch_desc(const char *fmt, ...)
+static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
{
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ae21f1591615..04b5ada460b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1522,8 +1522,6 @@ struct task_struct {
/* hung task detection */
unsigned long last_switch_count;
#endif
-/* CPU-specific state of this task */
- struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
@@ -1778,8 +1776,22 @@ struct task_struct {
unsigned long task_state_change;
#endif
int pagefault_disabled;
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/*
+ * WARNING: on x86, 'thread_struct' contains a variable-sized
+ * structure. It *MUST* be at the end of 'task_struct'.
+ *
+ * Do not put anything below here!
+ */
};
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+extern int arch_task_struct_size __read_mostly;
+#else
+# define arch_task_struct_size (sizeof(struct task_struct))
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..9b88536487e6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/*
- * Propagate page->pfmemalloc to the skb if we can. The problem is
- * that not all callers have unique ownership of the page. If
- * pfmemalloc is set, we check the mapping as a mapping implies
- * page->index is set (index and pfmemalloc share space).
- * If it's a valid mapping, we cannot use page->pfmemalloc but we
- * do not lose pfmemalloc information as the pages would not be
- * allocated using __GFP_MEMALLOC.
+ * Propagate page pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page but rely
+ * on page_is_pfmemalloc doing the right thing(tm).
*/
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
page = compound_head(page);
- if (page->pfmemalloc && !page->mapping)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
static inline void skb_propagate_pfmemalloc(struct page *page,
struct sk_buff *skb)
{
- if (page && page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
*
* PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
*
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 7c9b484735c5..1f6526c76ee8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
+/* Driver flags */
+#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
spinlock_t mtx;
atomic_t stop;
+ int drvflags;
u32 timer_interval;
u32 max_ndp_size;
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
};
u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da57759..644bdc61c387 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@ enum rc_filter_type {
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- * wakeup protocols is the set of all raw encoders
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
struct input_dev *input_dev;
enum rc_driver_type driver_type;
bool idle;
- bool encode_wakeup;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
-#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
int ir_raw_event_store_with_filter(struct rc_dev *dev,
struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2f5963..c192e1b46cdc 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@ enum vb2_io_modes {
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
VB2_BUF_STATE_PREPARING,
VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
+ VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
VB2_BUF_STATE_DONE,
VB2_BUF_STATE_ERROR,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
int tcf_hash_search(struct tc_action *a, u32 index);
void tcf_hash_destroy(struct tc_action *a);
-int tcf_hash_release(struct tc_action *a, int bind);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
int tcf_hash_check(u32 index, struct tc_action *a, int bind);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_hash_insert(struct tc_action *a);
+int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
+
+static inline int tcf_hash_release(struct tc_action *a, bool bind)
+{
+ return __tcf_hash_release(a, bind, false);
+}
+
int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
int tcf_unregister_action(struct tc_action_ops *a);
int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a741678f24a2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype);
+/**
+ * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @iftype: interface type
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.). This version
+ * also checks if IR-relaxation conditions apply, to allow beaconing under
+ * more permissive conditions.
+ *
+ * Requires the RTNL to be held.
+ */
+bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype);
+
/*
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
* @dev: the device which switched channels
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
* @INET_FRAG_FIRST_IN: first fragment has arrived
* @INET_FRAG_LAST_IN: final fragment has arrived
* @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
- * @INET_FRAG_EVICTED: frag queue is being evicted
*/
enum {
INET_FRAG_FIRST_IN = BIT(0),
INET_FRAG_LAST_IN = BIT(1),
INET_FRAG_COMPLETE = BIT(2),
- INET_FRAG_EVICTED = BIT(3)
};
/**
@@ -45,6 +43,7 @@ enum {
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
+ * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
*/
struct inet_frag_queue {
spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
__u8 flags;
u16 max_size;
struct netns_frags *net;
+ struct hlist_node list_evictor;
};
#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
inet_frag_destroy(q, f);
}
+static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+{
+ return !hlist_unhashed(&q->list_evictor);
+}
+
/* Memory Tracking Functions. */
/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
return percpu_counter_read(&nf->mem);
}
-static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
+ __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
}
-static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
+static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
+ __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
}
static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip.h b/include/net/ip.h
index 0750a186ea63..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
}
/* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
- int tb_default;
int tb_num_default;
struct rcu_head rcu;
unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
-void fib_select_default(struct fib_result *res);
+void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
-void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
spinlock_t lock;
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
- struct hlist_nulls_head tmpl;
};
struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);
-int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
struct request_sock_ops;
struct timewait_sock_ops;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 986fddb08579..b0f898e3b2e7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1745,6 +1745,7 @@ struct ib_device {
char node_desc[64];
__be64 node_guid;
u32 local_dma_lkey;
+ u16 is_switch:1;
u8 node_type;
u8 phys_port_cnt;
@@ -1824,6 +1825,20 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
u8 port_num);
/**
+ * rdma_cap_ib_switch - Check if the device is IB switch
+ * @device: Device to check
+ *
+ * Device driver is responsible for setting is_switch bit on
+ * in ib_device structure at init time.
+ *
+ * Return: true if the device is IB switch.
+ */
+static inline bool rdma_cap_ib_switch(const struct ib_device *device)
+{
+ return device->is_switch;
+}
+
+/**
* rdma_start_port - Return the first valid port number for the device
* specified
*
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
*/
static inline u8 rdma_start_port(const struct ib_device *device)
{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+ return rdma_cap_ib_switch(device) ? 0 : 1;
}
/**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
*/
static inline u8 rdma_end_port(const struct ib_device *device)
{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ?
- 0 : device->phys_port_cnt;
+ return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
}
static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710ef720..8d1d7fa67ec4 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1d440..d40d3ef25707 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
extern void srp_rport_del(struct srp_rport *);
extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
int dev_loss_tmo);
+int srp_parse_tmo(int *tmo, const char *buf);
extern int srp_reconnect_rport(struct srp_rport *rport);
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141b118b..427bc41df3ae 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
int io_ops_count;
};
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
/* gets a pointer to data from the firmware block header */
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
{
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
const struct snd_soc_tplg_widget_events *events, int num_events,
u16 event_type);
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+ u32 index)
+{
+ return 0;
+}
+
+#endif
+
#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
int bitmap_id;
int rx_thread_active;
struct task_struct *rx_thread;
+ struct completion rx_login_comp;
int tx_thread_active;
struct task_struct *tx_thread;
/* list_head for session connection list */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6fce900a833..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
#ifndef __AMDGPU_DRM_H__
#define __AMDGPU_DRM_H__
-#include <drm/drm.h>
+#include "drm.h"
#define DRM_AMDGPU_GEM_CREATE 0x00
#define DRM_AMDGPU_GEM_MMAP 0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
uint32_t vram_type;
/** video memory bit width*/
uint32_t vram_bit_width;
+ /* vce harvesting instance */
+ uint32_t vce_harvest_config;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6e1a2ed116cb..db809b722985 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that use
+ * offset (0x2538 | 1) instead.
+ *
+ */
struct drm_i915_reset_stats {
__u32 ctx_id;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
-#include <drm/drm.h>
+#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 669a1f0b1d97..23cbd34e4ac7 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -15,6 +15,7 @@ enum {
NETCONFA_RP_FILTER,
NETCONFA_MC_FORWARDING,
NETCONFA_PROXY_NEIGH,
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X Table entry format */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that Guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
__le32 queue_used_hi; /* read-write */
};
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+ struct virtio_pci_cap cap;
+ __u8 pci_cfg_data[4]; /* Data for BAR access. */
+};
+
/* Macro versions of offsets for the Old Timers! */
#define VIRTIO_PCI_CAP_VNDR 0
#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
* SUCH DAMAGE.
*
* Copyright Rusty Russell IBM Corporation 2007. */
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
#include <linux/types.h>
#include <linux/virtio_types.h>
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+ vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 12215205ab8d..247c50bd60f0 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
#include <linux/types.h>
#include <sound/asound.h>
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
/*
* Maximum number of channels topology kcontrol can represent.
*/
@@ -77,7 +83,7 @@
#define SND_SOC_TPLG_NUM_TEXTS 16
/* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION 0x2
+#define SND_SOC_TPLG_ABI_VERSION 0x3
/* Max size of TLV data */
#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +103,8 @@
#define SND_SOC_TPLG_TYPE_PCM 7
#define SND_SOC_TPLG_TYPE_MANIFEST 8
#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
-#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK
+#define SND_SOC_TPLG_TYPE_PDATA 10
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
/* vendor block IDs - please add new vendor types to end */
#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -110,7 +117,7 @@
/*
* Block Header.
- * This header preceeds all object and object arrays below.
+ * This header precedes all object and object arrays below.
*/
struct snd_soc_tplg_hdr {
__le32 magic; /* magic number */
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
/*
* Kcontrol TLV data.
*/
+struct snd_soc_tplg_tlv_dbscale {
+ __le32 min;
+ __le32 step;
+ __le32 mute;
+} __attribute__((packed));
+
struct snd_soc_tplg_ctl_tlv {
- __le32 size; /* in bytes aligned to 4 */
- __le32 numid; /* control element numeric identification */
- __le32 count; /* number of elem in data array */
- __le32 data[SND_SOC_TPLG_TLV_SIZE];
+ __le32 size; /* in bytes of this structure */
+ __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
+ union {
+ __le32 data[SND_SOC_TPLG_TLV_SIZE];
+ struct snd_soc_tplg_tlv_dbscale scale;
+ };
} __attribute__((packed));
/*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
} __attribute__((packed));
/*
- * Kcontrol Operations IDs
+ * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
+ * Kcontrol ops need get/put/info.
+ * Bytes ext ops need get/put.
*/
-struct snd_soc_tplg_kcontrol_ops_id {
+struct snd_soc_tplg_io_ops {
__le32 get;
__le32 put;
__le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
__le32 type;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
__le32 access;
- struct snd_soc_tplg_kcontrol_ops_id ops;
- __le32 tlv_size; /* non zero means control has TLV data */
+ struct snd_soc_tplg_io_ops ops;
+ struct snd_soc_tplg_ctl_tlv tlv;
} __attribute__((packed));
/*
@@ -222,7 +239,7 @@ struct snd_soc_tplg_stream_config {
/*
* Manifest. List totals for each payload type. Not used in parsing, but will
* be passed to the component driver before any other objects in order for any
- * global componnent resource allocations.
+ * global component resource allocations.
*
* File block representation for manifest :-
* +-----------------------------------+----+
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
__le32 graph_elems; /* number of graph elements */
__le32 dai_elems; /* number of DAI elements */
__le32 dai_link_elems; /* number of DAI link elements */
+ struct snd_soc_tplg_private priv;
} __attribute__((packed));
/*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
__le32 invert;
__le32 num_channels;
struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
- struct snd_soc_tplg_ctl_tlv tlv;
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
__le32 mask;
__le32 base;
__le32 num_regs;
+ struct snd_soc_tplg_io_ops ext_ops;
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
__le32 reg; /* negative reg = no direct dapm */
__le32 shift; /* bits to shift */
__le32 mask; /* non-shifted mask */
+ __le32 subseq; /* sort within widget type */
__u32 invert; /* invert the power bit */
__u32 ignore_suspend; /* kept enabled over suspend */
__u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
key_init();
security_init();
dbg_late_init();
- vfs_caches_init(totalram_pages);
+ vfs_caches_init();
signals_init();
/* rootfs populating might need page-writeback */
page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
if (!leaf)
return -ENOMEM;
INIT_LIST_HEAD(&leaf->msg_list);
- info->qsize += sizeof(*leaf);
}
leaf->priority = msg->m_type;
rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
"lazy leaf delete!\n");
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
if (list_empty(&leaf->msg_list)) {
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
new_leaf = NULL;
} else {
kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
} else {
kfree(new_leaf);
}
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
}
/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
+/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ ipc_smp_acquire__after_spin_is_unlocked();
}
/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/*
- * The ipc object lock check must be visible on all
- * cores before rechecking the complex count. Otherwise
- * we can race with another thread that does:
+ * We need a memory barrier with acquire semantics,
+ * otherwise we can race with another thread that does:
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- smp_rmb();
+ ipc_smp_acquire__after_spin_is_unlocked();
/*
* Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
+ rcu_read_unlock();
+ break;
+ }
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
+ /* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1) {
rcu_read_unlock();
- break;
+ continue;
}
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) {
rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- spin_lock(&ulp->lock);
+ /* we are the last process using this ulp, acquiring ulp->lock
+ * isn't required. Besides that, we are also protected against
+ * IPC_RMID as we hold sma->sem_perm lock now
+ */
list_del_rcu(&un->list_proc);
- spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
- file = shmem_file_setup(name, size, acctflag);
+ file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6a374544d495..5644ec5582b9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -527,18 +527,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
}
- /*
- * Some architectures have to walk the irq descriptors to
- * setup the vector space for the cpu which comes online.
- * Prevent irq alloc/free across the bringup.
- */
- irq_lock_sparse();
-
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
- irq_unlock_sparse();
-
if (ret != 0)
goto out_notify;
BUG_ON(!cpu_online(cpu));
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a35a29..f0acff0f66c9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
- update_nodemasks_hier(cs, &cs->mems_allowed);
+ update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
goto out;
}
+ event->tstamp_running += tstamp - event->tstamp_stopped;
+
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
perf_event_for_each_child(sibling, func);
}
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
- struct perf_event_context *ctx = event->ctx;
- int ret = 0, active;
+struct period_event {
+ struct perf_event *event;
u64 value;
+};
- if (!is_sampling_event(event))
- return -EINVAL;
-
- if (copy_from_user(&value, arg, sizeof(value)))
- return -EFAULT;
-
- if (!value)
- return -EINVAL;
+static int __perf_event_period(void *info)
+{
+ struct period_event *pe = info;
+ struct perf_event *event = pe->event;
+ struct perf_event_context *ctx = event->ctx;
+ u64 value = pe->value;
+ bool active;
- raw_spin_lock_irq(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
if (event->attr.freq) {
- if (value > sysctl_perf_event_sample_rate) {
- ret = -EINVAL;
- goto unlock;
- }
-
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
+ raw_spin_unlock(&ctx->lock);
-unlock:
+ return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+ struct period_event pe = { .event = event, };
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task;
+ u64 value;
+
+ if (!is_sampling_event(event))
+ return -EINVAL;
+
+ if (copy_from_user(&value, arg, sizeof(value)))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+
+ task = ctx->task;
+ pe.value = value;
+
+ if (!task) {
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
+ return 0;
+ }
+
+retry:
+ if (!task_function_call(task, __perf_event_period, &pe))
+ return 0;
+
+ raw_spin_lock_irq(&ctx->lock);
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ task = ctx->task;
+ goto retry;
+ }
+
+ __perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);
- return ret;
+ return 0;
}
static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
* to user-space before waking everybody up.
*/
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
else
perf_event_output(event, data, regs);
- if (event->fasync && event->pending_kill) {
+ if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
rb->aux_priv = NULL;
}
- for (pg = 0; pg < rb->aux_nr_pages; pg++)
- rb_free_aux_page(rb, pg);
+ if (rb->aux_nr_pages) {
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
- kfree(rb->aux_pages);
- rb->aux_nr_pages = 0;
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+ }
}
void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/fork.c b/kernel/fork.c
index 1bfefc6f96a4..dbd9b8d7b7cc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
}
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+/* Initialized by the architecture: */
+int arch_task_struct_size __read_mostly;
+#endif
+
void __init fork_init(void)
{
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
@@ -295,7 +300,7 @@ void __init fork_init(void)
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
- kmem_cache_create("task_struct", sizeof(struct task_struct),
+ kmem_cache_create("task_struct", arch_task_struct_size,
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332c7f84..ae216824e8ca 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
}
/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data: Pointer to interrupt specific data
+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_set_type)
+ return data->chip->irq_set_type(data, type);
+
+ return -ENOSYS;
+}
+
+/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
*
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
- return -ENOSYS;
+ return 0;
}
/**
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 9065107f083e..7a5237a1bce5 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/*
- * If the interrupt has a parent irq and runs
- * in the thread context of the parent irq,
- * retrigger the parent.
+ * If the interrupt is running in the thread
+ * context of the parent irq we need to be
+ * careful, because we cannot trigger it
+ * directly.
*/
- if (desc->parent_irq &&
- irq_settings_is_nested_thread(desc))
+ if (irq_settings_is_nested_thread(desc)) {
+ /*
+ * If the parent_irq is valid, we
+ * retrigger the parent, otherwise we
+ * do nothing.
+ */
+ if (!desc->parent_irq)
+ return;
irq = desc->parent_irq;
+ }
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
{
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
+EXPORT_SYMBOL_GPL(kthread_should_park);
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
{
__kthread_parkme(to_kthread(current));
}
+EXPORT_SYMBOL_GPL(kthread_parkme);
static int kthread(void *_create)
{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
if (kthread)
__kthread_unpark(k, kthread);
}
+EXPORT_SYMBOL_GPL(kthread_unpark);
/**
* kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
}
return ret;
}
+EXPORT_SYMBOL_GPL(kthread_park);
/**
* kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/bootmem.h>
+#include <linux/debug_locks.h>
/*
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
struct pv_node *node;
+ u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
/*
* We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD.
*/
- if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+ if (likely(lockval == _Q_LOCKED_VAL))
return;
+ if (unlikely(lockval != _Q_SLOW_VAL)) {
+ if (debug_locks_silent)
+ return;
+ WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+ return;
+ }
+
/*
* Since the above failed to release, this must be the SLOW path.
* Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
}
EXPORT_SYMBOL_GPL(find_symbol);
-/* Search for module by name: must hold module_mutex. */
+/*
+ * Search for module by name: must hold module_mutex (or preempt disabled
+ * for read-only access).
+ */
static struct module *find_module_all(const char *name, size_t len,
bool even_unformed)
{
struct module *mod;
- module_assert_mutex();
+ module_assert_mutex_or_preempt();
list_for_each_entry(mod, &modules, list) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
struct module *find_module(const char *name)
{
+ module_assert_mutex();
return find_module_all(name, strlen(name), false);
}
EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
{
struct resource *p;
resource_size_t end = start + size - 1;
- int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
const char *name = "System RAM";
int ret = -1;
read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) {
- if (end < p->start)
+ if (p->end < start)
continue;
if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
ret = 1;
break;
}
- if (p->end < start)
+ if (end < p->start)
break; /* not found */
}
read_unlock(&resource_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 65c8f3ebdc3c..d113c3ba8bc4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
- empty = list_empty(&cfs_rq->throttled_list);
+ empty = list_empty(&cfs_b->throttled_cfs_rq);
/*
* Add to the _head_ of the list, so that an already-started
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
#ifdef SEGV_BNDERR
- err |= __put_user(from->si_lower, &to->si_lower);
- err |= __put_user(from->si_upper, &to->si_upper);
+ if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+ err |= __put_user(from->si_lower, &to->si_lower);
+ err |= __put_user(from->si_upper, &to->si_upper);
+ }
#endif
break;
case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
+ siginfo_t info = {};
int ret = copy_siginfo_from_user32(&info, uinfo);
if (unlikely(ret))
return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
+ siginfo_t info = {};
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 52b9e199b5ac..f6aae7977824 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -839,7 +839,6 @@ out:
raw_spin_unlock(&tick_broadcast_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
/*
* Reset the one shot broadcast for a cpu
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 55e13efff1ab..f8bf47571dda 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -363,6 +363,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
return __tick_broadcast_oneshot_control(state);
}
+EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
#ifdef CONFIG_HOTPLUG_CPU
/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa9faf7..84190f02b521 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
spin_unlock(&base->lock);
base = new_base;
spin_lock(&base->lock);
- timer->flags &= ~TIMER_BASEMASK;
- timer->flags |= base->cpu;
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
struct pid *pid;
};
+static bool ftrace_pids_enabled(void)
+{
+ return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
if (!test_tsk_trace_trace(current))
return;
- ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
- /* do not set ftrace_pid_function to itself! */
- if (func != ftrace_pid_func)
- ftrace_pid_function = func;
+ op->saved_func(ip, parent_ip, op, regs);
}
/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
- ftrace_pid_function = ftrace_stub;
}
static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
} else
add_ftrace_ops(&ftrace_ops_list, ops);
+ /* Always save the function, and reset at unregistering */
+ ops->saved_func = ops->func;
+
+ if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+ ops->func = ftrace_pid_func;
+
ftrace_update_trampoline(ops);
if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
+ ops->func = ops->saved_func;
+
return 0;
}
static void ftrace_update_pid_func(void)
{
+ bool enabled = ftrace_pids_enabled();
+ struct ftrace_ops *op;
+
/* Only do something if we are tracing something */
if (ftrace_trace_function == ftrace_stub)
return;
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->flags & FTRACE_OPS_FL_PID) {
+ op->func = enabled ? ftrace_pid_func :
+ op->saved_func;
+ ftrace_update_trampoline(op);
+ }
+ } while_for_each_ftrace_op(op);
+
update_ftrace_function();
}
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED,
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
if (WARN_ON(tr->ops->func != ftrace_stub))
printk("ftrace ops had %pS for function\n",
tr->ops->func);
- /* Only the top level instance does pid tracing */
- if (!list_empty(&ftrace_pids)) {
- set_ftrace_pid_function(func);
- func = ftrace_pid_func;
- }
}
tr->ops->func = func;
tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&ftrace_lock);
- if (list_empty(&ftrace_pids) && (!*pos))
+ if (!ftrace_pids_enabled() && (!*pos))
return (void *) 1;
return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID |
FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f060716b02ae..74bde81601a9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -444,6 +444,7 @@ enum {
TRACE_CONTROL_BIT,
+ TRACE_BRANCH_BIT,
/*
* Abuse of the trace_recursion.
* As we need a way to maintain state if we are tracing the function
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index a87b43f49eb4..e2e12ad3186f 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags;
- int cpu, pc;
+ int pc;
const char *p;
+ if (current->trace_recursion & TRACE_BRANCH_BIT)
+ return;
+
/*
* I would love to save just the ftrace_likely_data pointer, but
* this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (unlikely(!tr))
return;
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
- if (atomic_inc_return(&data->disabled) != 1)
+ raw_local_irq_save(flags);
+ current->trace_recursion |= TRACE_BRANCH_BIT;
+ data = this_cpu_ptr(tr->trace_buffer.data);
+ if (atomic_read(&data->disabled))
goto out;
pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
__buffer_unlock_commit(buffer, event);
out:
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ current->trace_recursion &= ~TRACE_BRANCH_BIT;
+ raw_local_irq_restore(flags);
}
static inline
diff --git a/lib/decompress.c b/lib/decompress.c
index 528ff932d8e4..62696dff5730 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
{
const struct compress_format *cf;
- if (len < 2)
+ if (len < 2) {
+ if (name)
+ *name = NULL;
return NULL; /* Need at least this much... */
+ }
pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ae4b65e17e64..dace71fe41f7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
unsigned long flags;
phys_addr_t cln;
+ if (dma_debug_disabled())
+ return;
+
if (!page)
return;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 7ea09699855d..8d74c20d8595 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <asm/unaligned.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
+ get_unaligned(ptr8 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "",
- *(ptr4 + j));
+ get_unaligned(ptr4 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "",
- *(ptr2 + j));
+ get_unaligned(ptr2 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
unsigned long align_mask = 0;
if (align_order > 0)
- align_mask = 0xffffffffffffffffl >> (64 - align_order);
+ align_mask = ~0ul >> (BITS_PER_LONG - align_order);
/* Sanity check */
if (unlikely(npages == 0)) {
diff --git a/lib/kobject.c b/lib/kobject.c
index 2e3bd01964a9..3e3a5c3cb330 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -337,8 +337,9 @@ error:
}
EXPORT_SYMBOL(kobject_init);
-static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
- const char *fmt, va_list vargs)
+static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, va_list vargs)
{
int retval;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a60a6d335a91..cc0c69710dcf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -610,6 +610,8 @@ next:
iter->skip = 0;
}
+ iter->p = NULL;
+
/* Ensure we see any new tables. */
smp_rmb();
@@ -620,8 +622,6 @@ next:
return ERR_PTR(-EAGAIN);
}
- iter->p = NULL;
-
return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned cma_area_count;
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 7621ee34daa0..f8e4b60db167 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -39,7 +39,7 @@ static int cma_used_get(void *data, u64 *val)
mutex_lock(&cma->lock);
/* pages counter is smaller than sizeof(int) */
- used = bitmap_weight(cma->bitmap, (int)cma->count);
+ used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
mutex_unlock(&cma->lock);
*val = (u64)used << cma->order_per_bit;
@@ -52,13 +52,14 @@ static int cma_maxchunk_get(void *data, u64 *val)
struct cma *cma = data;
unsigned long maxchunk = 0;
unsigned long start, end = 0;
+ unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
mutex_lock(&cma->lock);
for (;;) {
- start = find_next_zero_bit(cma->bitmap, cma->count, end);
+ start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
if (start >= cma->count)
break;
- end = find_next_bit(cma->bitmap, cma->count, start);
+ end = find_next_bit(cma->bitmap, bitmap_maxno, start);
maxchunk = max(end - start, maxchunk);
}
mutex_unlock(&cma->lock);
@@ -170,10 +171,10 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
tmp = debugfs_create_dir(name, cma_debugfs_root);
- debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
+ debugfs_create_file("alloc", S_IWUSR, tmp, cma,
&cma_alloc_fops);
- debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
+ debugfs_create_file("free", S_IWUSR, tmp, cma,
&cma_free_fops);
debugfs_create_file("base_pfn", S_IRUGO, tmp,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
/* after clearing PageTail the gup refcount can be released */
smp_mb__after_atomic();
- /*
- * retain hwpoison flag of the poisoned tail page:
- * fix for the unsuitable process killed on Guest Machine(KVM)
- * by the memory-failure.
- */
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
+ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
* This file contains shadow memory manipulation code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
* This file contains error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
* directly for tail pages.
*/
if (PageTransHuge(head)) {
+ /*
+ * Non anonymous thp exists only in allocation/free time. We
+ * can't handle such a case correctly, so let's give it up.
+ * This should be better than triggering BUG_ON when kernel
+ * tries to touch the "partially handled" page.
+ */
+ if (!PageAnon(head)) {
+ pr_err("MCE: %#lx: non anonymous thp\n",
+ page_to_pfn(page));
+ return 0;
+ }
+
if (get_page_unless_zero(head)) {
if (PageTail(page))
get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- if (!PageAnon(hpage)) {
- pr_err("MCE: %#lx: non anonymous thp\n", pfn);
- if (TestClearPageHWPoison(p))
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- put_page(p);
- if (p != hpage)
- put_page(hpage);
- return -EBUSY;
- }
- if (unlikely(split_huge_page(hpage))) {
- pr_err("MCE: %#lx: thp split failed\n", pfn);
+ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+ if (!PageAnon(hpage))
+ pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+ else
+ pr_err("MCE: %#lx: thp split failed\n", pfn);
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &num_poisoned_pages);
put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unlock_page(hpage);
put_page(hpage);
- res = 0;
- goto out;
+ return 0;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
*/
ret = __get_any_page(page, pfn, 0);
if (!PageLRU(page)) {
+ /* Drop page reference which is from __get_any_page() */
+ put_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
unlock_page(hpage);
ret = isolate_huge_page(hpage, &pagelist);
- if (ret) {
- /*
- * get_any_page() and isolate_huge_page() takes a refcount each,
- * so need to drop one here.
- */
- put_page(hpage);
- } else {
+ /*
+ * get_any_page() and isolate_huge_page() takes a refcount each,
+ * so need to drop one here.
+ */
+ put_page(hpage);
+ if (!ret) {
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
}
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
- } else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (TestClearPageHWPoison(page))
+ atomic_long_dec(&num_poisoned_pages);
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
- unsigned long flags;
+ unsigned long flags, pfn;
int ret;
zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
pgdat_resize_unlock(zone->zone_pgdat, &flags);
memmap_init_zone(nr_pages, nid, zone_type,
phys_start_pfn, MEMMAP_HOTPLUG);
+
+ /* online_page_range is called later and expects pages reserved */
+ for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
+ if (!pfn_valid(pfn))
+ continue;
+
+ SetPageReserved(pfn_to_page(pfn));
+ }
return 0;
}
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ memblock_add_node(start, size, nid);
goto out;
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
+ memblock_free(start, size);
+ memblock_remove(start, size);
arch_remove_memory(start, size);
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* Establish migration ptes or remove ptes */
if (page_mapped(page)) {
try_to_unmap(page,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+ TTU_IGNORE_HWPOISON);
page_was_mapped = 1;
}
@@ -950,7 +951,10 @@ out:
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (reason != MR_MEMORY_FAILURE)
+ /* Soft-offlined page shouldn't go through lru cache list */
+ if (reason == MR_MEMORY_FAILURE)
+ put_page(page);
+ else
putback_lru_page(page);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
*/
void __init page_writeback_init(void)
{
+ BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
+
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
-
- BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 506eac8b38af..5b5240b7f642 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
-#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
@@ -246,9 +245,7 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- int nid = early_pfn_to_nid(pfn);
-
- if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
+ if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
return true;
return false;
@@ -983,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-/* Only safe to use early in boot when initialisation is single-threaded */
+
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
int __meminit early_pfn_to_nid(unsigned long pfn)
{
+ static DEFINE_SPINLOCK(early_pfn_lock);
int nid;
- /* The system will behave unpredictably otherwise */
- BUG_ON(system_state != SYSTEM_BOOTING);
-
+ spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
- if (nid >= 0)
- return nid;
- /* just returns 0 */
- return 0;
+ if (nid < 0)
+ nid = 0;
+ spin_unlock(&early_pfn_lock);
+
+ return nid;
}
#endif
@@ -1062,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
__free_pages_boot_core(page, pfn, 0);
}
-static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
+/* Completion tracking for deferred_init_memmap() threads */
+static atomic_t pgdat_init_n_undone __initdata;
+static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
+
+static inline void __init pgdat_init_report_one_done(void)
+{
+ if (atomic_dec_and_test(&pgdat_init_n_undone))
+ complete(&pgdat_init_all_done_comp);
+}
/* Initialise remaining memory on a node */
static int __init deferred_init_memmap(void *data)
@@ -1079,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
if (first_init_pfn == ULONG_MAX) {
- up_read(&pgdat_init_rwsem);
+ pgdat_init_report_one_done();
return 0;
}
@@ -1179,7 +1184,8 @@ free_range:
pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
jiffies_to_msecs(jiffies - start));
- up_read(&pgdat_init_rwsem);
+
+ pgdat_init_report_one_done();
return 0;
}
@@ -1187,14 +1193,17 @@ void __init page_alloc_init_late(void)
{
int nid;
+ /* There will be num_node_state(N_MEMORY) threads */
+ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
for_each_node_state(nid, N_MEMORY) {
- down_read(&pgdat_init_rwsem);
kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
}
/* Block until all are initialised */
- down_write(&pgdat_init_rwsem);
- up_write(&pgdat_init_rwsem);
+ wait_for_completion(&pgdat_init_all_done_comp);
+
+ /* Reinit limits that are based on free pages after the kernel is up */
+ files_maxfiles_init();
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
@@ -1287,6 +1296,10 @@ static inline int check_new_page(struct page *page)
bad_reason = "non-NULL mapping";
if (unlikely(atomic_read(&page->_count) != 0))
bad_reason = "nonzero _count";
+ if (unlikely(page->flags & __PG_HWPOISON)) {
+ bad_reason = "HWPoisoned (hardware-corrupted)";
+ bad_flags = __PG_HWPOISON;
+ }
if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -1330,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
set_page_owner(page, order, gfp_flags);
/*
- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+ if (alloc_flags & ALLOC_NO_WATERMARKS)
+ set_page_pfmemalloc(page);
+ else
+ clear_page_pfmemalloc(page);
return 0;
}
@@ -1950,6 +1966,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
void split_page(struct page *page, unsigned int order)
{
int i;
+ gfp_t gfp_mask;
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
@@ -1963,10 +1980,11 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- set_page_owner(page, 0, 0);
+ gfp_mask = get_page_owner_gfp(page);
+ set_page_owner(page, 0, gfp_mask);
for (i = 1; i < (1 << order); i++) {
set_page_refcounted(page + i);
- set_page_owner(page + i, 0, 0);
+ set_page_owner(page + i, 0, gfp_mask);
}
}
EXPORT_SYMBOL_GPL(split_page);
@@ -1996,6 +2014,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
+ set_page_owner(page, order, __GFP_MOVABLE);
+
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
@@ -2007,7 +2027,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
}
- set_page_owner(page, order, 0);
+
return 1UL << order;
}
@@ -3328,7 +3348,7 @@ refill:
atomic_add(size - 1, &page->_count);
/* reset page count bias and offset to start of new frag */
- nc->pfmemalloc = page->pfmemalloc;
+ nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = size;
nc->offset = size;
}
@@ -5043,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
{
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
/* Get the start and end of the zone */
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5106,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index bd5f842b56d2..983c3a10fa07 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -76,6 +76,13 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
+gfp_t __get_page_owner_gfp(struct page *page)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ return page_ext->gfp_mask;
+}
+
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
struct page *page, struct page_ext *page_ext)
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
* shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
* kernel internal. There will be NO LSM permission checks against the
* underlying inode. So users of this interface must do LSM checks at a
- * higher layer. The one user is the big_key implementation. LSM checks
- * are provided at the key level rather than the inode level.
+ * higher layer. The users are the big_key and shm implementations. LSM
+ * checks are provided at the key or shm level rather than the inode.
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab.c b/mm/slab.c
index 200e22412a16..bbd0b47dc6a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
}
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
- if (unlikely(page->pfmemalloc))
+ if (page_is_pfmemalloc(page))
pfmemalloc_active = true;
nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB)
-#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_CACHE_DMA | SLAB_NOTRACK)
+#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
/*
* Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 816df0016555..f68c0e50f3c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
start = page_address(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* caller can stall after page list has been processed.
*
* 2) Global or new memcg reclaim encounters a page that is
- * not marked for immediate reclaim or the caller does not
- * have __GFP_IO. In this case mark the page for immediate
+ * not marked for immediate reclaim, or the caller does not
+ * have __GFP_FS (or __GFP_IO if it's simply going to swap,
+ * not to fs). In this case mark the page for immediate
* reclaim and continue scanning.
*
- * __GFP_IO is checked because a loop driver thread might
+ * Require may_enter_fs because we would wait on fs, which
+ * may not have submitted IO yet. And the loop driver might
* enter reclaim, and deadlock if it waits on a page for
* which it is needed to do the write (loop masks off
* __GFP_IO|__GFP_FS for this reason); but more thought
* would probably show more reasons.
*
- * Don't require __GFP_FS, since we're not going into the
- * FS, just waiting on its writeback completion. Worryingly,
- * ext4 gfs2 and xfs allocate pages with
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
- * may_enter_fs here is liable to OOM on them.
- *
* 3) Legacy memcg encounters a page that is not already marked
* PageReclaim. memcg does not have any dirty pages
* throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 2 above */
} else if (sane_reclaim(sc) ||
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+ !PageReclaim(page) || !may_enter_fs) {
/*
* This is slightly racy - end_page_writeback()
* might have just cleared PageReclaim, then
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b3c06c..ea79ee9a7348 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
fid->fid, (unsigned long long) offset,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
mutex_unlock(&virtio_9p_lock);
+ vdev->config->reset(vdev);
vdev->config->del_vqs(vdev);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
+ ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
*/
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
uint16_t type;
__be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst;
- bool ret = false;
+ bool dropped = false;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
*/
- ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+ dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+ /* if this REPLY is sent on behalf of a client of mine, let's drop the
+ * packet because the client will reply by itself
+ */
+ dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
out:
- if (ret)
+ if (dropped)
kfree_skb(skb);
- /* if ret == false -> packet has to be delivered to the interface */
- return ret;
+ /* if dropped == false -> deliver to the interface */
+ return dropped;
}
/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
*/
void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{
+ if (!vlan)
+ return;
+
if (atomic_dec_and_test(&vlan->refcount)) {
spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+ addr, BATADV_PRINT_VID(vid))) {
+ kfree(tt_local);
+ tt_local = NULL;
+ goto out;
+ }
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
struct batadv_softif_vlan *vlan;
+ void *tt_entry_exists;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
* immediately purge it
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+ tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_local_entry->common);
+ if (!tt_entry_exists)
+ goto out;
+
+ /* extra call to free the local tt entry */
batadv_tt_local_entry_free_ref(tt_local_entry);
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ goto out;
+
batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan);
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv,
tt_common_entry->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
/* Make sure we copy only the significant bytes based on the
* encryption key size, and set the rest of the value to zeroes.
*/
- memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+ memcpy(ev.key.val, key->val, key->enc_size);
memset(ev.key.val + key->enc_size, 0,
sizeof(ev.key.val) - key->enc_size);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
return 1;
chan = conn->smp;
+ if (!chan) {
+ BT_ERR("SMP security requested but not available");
+ return 1;
+ }
if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e97572b5d2cc..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,14 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
{
- if (!is_skb_forwardable(skb->dev, skb)) {
- kfree_skb(skb);
- } else {
- skb_push(skb, ETH_HLEN);
- br_drop_fake_rtable(skb);
- dev_queue_xmit(skb);
+ if (!is_skb_forwardable(skb->dev, skb))
+ goto drop;
+
+ skb_push(skb, ETH_HLEN);
+ br_drop_fake_rtable(skb);
+ skb_sender_cpu_clear(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))) {
+ int depth;
+
+ if (!__vlan_get_protocol(skb, skb->protocol, &depth))
+ goto drop;
+
+ skb_set_network_header(skb, depth);
}
+ dev_queue_xmit(skb);
+
+ return 0;
+
+drop:
+ kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index e29ad70b3000..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_htable *mdb;
+ unsigned long now = jiffies;
int err;
mdb = mlock_dereference(br->mdb, br);
@@ -347,8 +348,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
+ if (state == MDB_TEMPORARY)
+ mod_timer(&p->timer, now + br->multicast_membership_interval);
- br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
return 0;
}
@@ -371,6 +373,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
+ memset(&ip, 0, sizeof(ip));
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
@@ -417,20 +420,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
+ memset(&ip, 0, sizeof(ip));
ip.proto = entry->addr.proto;
- if (ip.proto == htons(ETH_P_IP)) {
- if (timer_pending(&br->ip4_other_query.timer))
- return -EBUSY;
-
+ if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- } else {
- if (timer_pending(&br->ip6_other_query.timer))
- return -EBUSY;
-
+ else
ip.u.ip6 = entry->addr.u.ip6;
#endif
- }
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -448,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (p->port->state == BR_STATE_DISABLED)
goto unlock;
+ entry->state = p->state;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *port);
+static void br_ip4_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group,
+ __u16 vid);
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ const struct in6_addr *group,
+ __u16 vid);
+#endif
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
continue;
}
- err = br_ip4_multicast_add_group(br, port, group, vid);
- if (err)
- break;
+ if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
+ type == IGMPV3_MODE_IS_INCLUDE) &&
+ ntohs(grec->grec_nsrcs) == 0) {
+ br_ip4_multicast_leave_group(br, port, group, vid);
+ } else {
+ err = br_ip4_multicast_add_group(br, port, group, vid);
+ if (err)
+ break;
+ }
}
return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
continue;
}
- err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
- vid);
- if (err)
- break;
+ if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
+ grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
+ ntohs(*nsrcs) == 0) {
+ br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
+ vid);
+ } else {
+ err = br_ip6_multicast_add_group(br, port,
+ &grec->grec_mca, vid);
+ if (!err)
+ break;
+ }
}
return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
- (port && port->state == BR_STATE_DISABLED) ||
- timer_pending(&other_query->timer))
+ (port && port->state == BR_STATE_DISABLED))
goto out;
mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
if (!mp)
goto out;
+ if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
+ struct net_bridge_port_group __rcu **pp;
+
+ for (pp = &mp->ports;
+ (p = mlock_dereference(*pp, br)) != NULL;
+ pp = &p->next) {
+ if (p->port != port)
+ continue;
+
+ rcu_assign_pointer(*pp, p->next);
+ hlist_del_init(&p->mglist);
+ del_timer(&p->timer);
+ call_rcu_bh(&p->rcu, br_multicast_free_pg);
+ br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+
+ if (!mp->ports && !mp->mglist &&
+ netif_running(br->dev))
+ mod_timer(&mp->timer, jiffies);
+ }
+ goto out;
+ }
+
+ if (timer_pending(&other_query->timer))
+ goto out;
+
if (br->multicast_querier) {
__br_multicast_send_query(br, port, &mp->addr);
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
}
}
- if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
- struct net_bridge_port_group __rcu **pp;
-
- for (pp = &mp->ports;
- (p = mlock_dereference(*pp, br)) != NULL;
- pp = &p->next) {
- if (p->port != port)
- continue;
-
- rcu_assign_pointer(*pp, p->next);
- hlist_del_init(&p->mglist);
- del_timer(&p->timer);
- call_rcu_bh(&p->rcu, br_multicast_free_pg);
- br_mdb_notify(br->dev, port, group, RTM_DELMDB);
-
- if (!mp->ports && !mp->mglist &&
- netif_running(br->dev))
- mod_timer(&mp->timer, jiffies);
- }
- goto out;
- }
-
now = jiffies;
time = now + br->multicast_last_member_count *
br->multicast_last_member_interval;
@@ -1566,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
@@ -1611,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d89f4fac0bc5..c8b9bcfe997e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct brnf_frag_data {
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
u8 encap_size;
@@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
}
#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
int (*output)(struct sock *, struct sk_buff *))
{
@@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
return ip_do_fragment(sk, skb, output);
}
+#endif
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
@@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
struct brnf_frag_data *data;
if (br_validate_ipv4(skb))
- return NF_DROP;
+ goto drop;
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
struct brnf_frag_data *data;
if (br_validate_ipv6(skb))
- return NF_DROP;
+ goto drop;
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
if (v6ops)
return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
- else
- return -EMSGSIZE;
+
+ kfree_skb(skb);
+ return -EMSGSIZE;
}
#endif
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(sk, skb);
+ drop:
+ kfree_skb(skb);
+ return 0;
}
/* PF_BRIDGE/POST_ROUTING ********************************************/
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 6d12d2675c80..13b7d1e3d185 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
struct net_device *dev = skb->dev;
- struct inet6_dev *idev = in6_dev_get(skb->dev);
+ struct inet6_dev *idev = __in6_dev_get(skb->dev);
u32 pkt_len;
u8 ip6h_len = sizeof(struct ipv6hdr);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6b67ed3831de..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ 0;
}
@@ -457,6 +459,8 @@ static int br_afspec(struct net_bridge *br,
if (nla_len(attr) != sizeof(struct bridge_vlan_info))
return -EINVAL;
vinfo = nla_data(attr);
+ if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+ return -EINVAL;
if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (vinfo_start)
return -EINVAL;
@@ -504,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
@@ -691,9 +697,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
struct nlattr *tb[],
struct nlattr *data[])
{
+ struct net_bridge *br = netdev_priv(brdev);
+ int ret;
+
if (!data)
return 0;
- return br_setport(br_port_get_rtnl(dev), data);
+
+ spin_lock_bh(&br->lock);
+ ret = br_setport(br_port_get_rtnl(dev), data);
+ spin_unlock_bh(&br->lock);
+
+ return ret;
}
static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
br_send_config_bpdu(p, &bpdu);
p->topology_change_ack = 0;
p->config_pending = 0;
- mod_timer(&p->hold_timer,
- round_jiffies(jiffies + BR_HOLD_TIME));
+ if (p->br->stp_enabled == BR_KERNEL_STP)
+ mod_timer(&p->hold_timer,
+ round_jiffies(jiffies + BR_HOLD_TIME));
}
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
struct net_bridge_port *p;
spin_lock_bh(&br->lock);
- mod_timer(&br->hello_timer, jiffies + br->hello_time);
+ if (br->stp_enabled == BR_KERNEL_STP)
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
mod_timer(&br->gc_timer, jiffies + HZ/10);
br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
int r;
char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
char *envp[] = { NULL };
+ struct net_bridge_port *p;
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
if (r == 0) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
+ /* Stop hello and hold timers */
+ del_timer(&br->hello_timer);
+ list_for_each_entry(p, &br->port_list, list)
+ del_timer(&p->hold_timer);
} else {
br->stp_enabled = BR_KERNEL_STP;
br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
int r;
char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
char *envp[] = { NULL };
+ struct net_bridge_port *p;
if (br->stp_enabled == BR_USER_STP) {
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
br_info(br, "userspace STP stopped, return code %d\n", r);
/* To start timers on any ports left in blocking */
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
+ list_for_each_entry(p, &br->port_list, list)
+ mod_timer(&p->hold_timer,
+ round_jiffies(jiffies + BR_HOLD_TIME));
spin_lock_bh(&br->lock);
br_port_state_selection(br);
spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
- mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time));
+ if (br->stp_enabled != BR_USER_STP)
+ mod_timer(&br->hello_timer,
+ round_jiffies(jiffies + br->hello_time));
}
spin_unlock(&br->lock);
}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
* not dropped, but CAIF is sending flow off instead.
*/
-static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ bool queued = false;
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
err = sk_filter(sk, skb);
if (err)
- return err;
+ goto out;
+
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- /* Cache the SKB length before we tack it onto the receive
- * queue. Once it is added it no longer belongs to us and
- * may be freed by other threads of control pulling packets
- * from the queue.
- */
spin_lock_irqsave(&list->lock, flags);
- if (!sock_flag(sk, SOCK_DEAD))
+ queued = !sock_flag(sk, SOCK_DEAD);
+ if (queued)
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
-
- if (!sock_flag(sk, SOCK_DEAD))
+out:
+ if (queued)
sk->sk_data_ready(sk);
else
kfree_skb(skb);
- return 0;
}
/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7933e62a7318..166d436196c1 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* timer for statistics update */
struct s_stats can_stats; /* packet statistics */
struct s_pstats can_pstats; /* receive list statistics */
+static atomic_t skbcounter = ATOMIC_INIT(0);
+
/*
* af_can socket functions
*/
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
return err;
}
- if (newskb) {
- if (!(newskb->tstamp.tv64))
- __net_timestamp(newskb);
-
+ if (newskb)
netif_rx_ni(newskb);
- }
/* update statistics */
can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
can_stats.rx_frames++;
can_stats.rx_frames_delta++;
+ /* create non-zero unique skb identifier together with *skb */
+ while (!(can_skb_prv(skb)->skbcnt))
+ can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
+
rcu_read_lock();
/* deliver the packet to sockets listening on all devices */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index b523453585be..a1ba6875c2a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
}
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
skb->dev = dev;
can_skb_set_owner(skb, sk);
err = can_send(skb, 1); /* send with loopback */
diff --git a/net/can/raw.c b/net/can/raw.c
index 31b9748cbb4e..2e67b1423cd3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
*/
struct uniqframe {
- ktime_t tstamp;
+ int skbcnt;
const struct sk_buff *skb;
unsigned int join_rx_count;
};
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
- ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+ this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
if (ro->join_filters) {
this_cpu_inc(ro->uniq->join_rx_count);
/* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
}
} else {
this_cpu_ptr(ro->uniq)->skb = oskb;
- this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+ this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
this_cpu_ptr(ro->uniq)->join_rx_count = 1;
/* drop first frame to check all enabled filters? */
if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
goto out;
}
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (skb->peeked)
+ return skb;
+
+ /* We have to unshare an skb before modifying it. */
+ if (!skb_shared(skb))
+ goto done;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ skb->prev->next = nskb;
+ skb->next->prev = nskb;
+ nskb->prev = skb->prev;
+ nskb->next = skb->next;
+
+ consume_skb(skb);
+ skb = nskb;
+
+done:
+ skb->peeked = 1;
+
+ return skb;
+}
+
/**
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
int *peeked, int *off, int *err)
{
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
struct sk_buff *skb, *last;
+ unsigned long cpu_flags;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
- unsigned long cpu_flags;
- struct sk_buff_head *queue = &sk->sk_receive_queue;
int _off = *off;
last = (struct sk_buff *)queue;
@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
_off -= skb->len;
continue;
}
- skb->peeked = 1;
+
+ skb = skb_set_peeked(skb);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto unlock_err;
+
atomic_inc(&skb->users);
} else
__skb_unlink(skb, queue);
@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
return NULL;
+unlock_err:
+ spin_unlock_irqrestore(&queue->lock, cpu_flags);
no_packet:
*err = error;
return NULL;
@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev);
}
- skb->csum_valid = !sum;
+ if (!skb_shared(skb))
+ skb->csum_valid = !sum;
return sum;
}
EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
netdev_rx_csum_fault(skb->dev);
}
- /* Save full packet checksum */
- skb->csum = csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum_complete_sw = 1;
- skb->csum_valid = !sum;
+ if (!skb_shared(skb)) {
+ /* Save full packet checksum */
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum_complete_sw = 1;
+ skb->csum_valid = !sum;
+ }
return sum;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6778a9999d52..a8e4dd430285 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
- /* If dev->rtnl_link_ops is set, it's a virtual interface. */
- if (dev->rtnl_link_ops)
- return 0;
-
return dev->ifindex;
}
EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
local_irq_save(flags);
rps_lock(sd);
+ if (!netif_running(skb->dev))
+ goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
goto enqueue;
}
+drop:
sd->dropped++;
rps_unlock(sd);
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
pt_prev = NULL;
- rcu_read_lock();
-
another_round:
skb->skb_iif = skb->dev->ifindex;
@@ -3786,7 +3783,7 @@ another_round:
skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
- goto unlock;
+ goto out;
}
#ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
if (static_key_false(&ingress_needed)) {
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
- goto unlock;
+ goto out;
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
- goto unlock;
+ goto out;
}
#endif
#ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
if (vlan_do_receive(&skb))
goto another_round;
else if (unlikely(!skb))
- goto unlock;
+ goto out;
}
rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
ret = NET_RX_SUCCESS;
- goto unlock;
+ goto out;
case RX_HANDLER_ANOTHER:
goto another_round;
case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
ret = NET_RX_DROP;
}
-unlock:
- rcu_read_unlock();
+out:
return ret;
}
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
static int netif_receive_skb_internal(struct sk_buff *skb)
{
+ int ret;
+
net_timestamp_check(netdev_tstamp_prequeue, skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
+ rcu_read_lock();
+
#ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
- int cpu, ret;
-
- rcu_read_lock();
-
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
return ret;
}
- rcu_read_unlock();
}
#endif
- return __netif_receive_skb(skb);
+ ret = __netif_receive_skb(skb);
+ rcu_read_unlock();
+ return ret;
}
/**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sd->process_queue))) {
+ rcu_read_lock();
local_irq_enable();
__netif_receive_skb(skb);
+ rcu_read_unlock();
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
+ on_each_cpu(flush_backlog, dev, 1);
}
synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
struct netdev_queue *tx;
size_t sz = count * sizeof(*tx);
- BUG_ON(count < 1 || count > 0xffff);
+ if (count < 1 || count > 0xffff)
+ return -EINVAL;
tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
dev->reg_state = NETREG_UNREGISTERED;
- on_each_cpu(flush_backlog, dev, 1);
-
netdev_wait_allrefs(dev);
/* paranoia */
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
int newrefcnt;
newrefcnt = atomic_dec_return(&dst->__refcnt);
- WARN_ON(newrefcnt < 0);
+ if (unlikely(newrefcnt < 0))
+ net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+ __func__, dst, newrefcnt);
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9dfb88a933e7..92d886f4adcb 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,7 +66,7 @@
NOTES.
- * avbps is scaled by 2^5, avpps is scaled by 2^10.
+ * avbps and avpps are scaled by 2^5.
* both values are reported as 32 bit unsigned values. bps can
overflow for fast links : max speed being 34360Mbit/sec
* Minimal interval is HZ/4=250msec (it is the greatest common divisor
@@ -85,10 +85,10 @@ struct gen_estimator
struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock;
int ewma_log;
+ u32 last_packets;
+ unsigned long avpps;
u64 last_bytes;
u64 avbps;
- u32 last_packets;
- u32 avpps;
struct rcu_head e_rcu;
struct rb_node node;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
@@ -118,8 +118,8 @@ static void est_timer(unsigned long arg)
rcu_read_lock();
list_for_each_entry_rcu(e, &elist[idx].list, list) {
struct gnet_stats_basic_packed b = {0};
+ unsigned long rate;
u64 brate;
- u32 rate;
spin_lock(e->stats_lock);
read_lock(&est_lock);
@@ -133,10 +133,11 @@ static void est_timer(unsigned long arg)
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5;
- rate = (b.packets - e->last_packets)<<(12 - idx);
+ rate = b.packets - e->last_packets;
+ rate <<= (7 - idx);
e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
- e->rate_est->pps = (e->avpps+0x1FF)>>10;
+ e->rate_est->pps = (e->avpps + 0xF) >> 5;
skip:
read_unlock(&est_lock);
spin_unlock(e->stats_lock);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
- return css_cls_state(task_css(p, net_cls_cgrp_id));
+ return css_cls_state(task_css_check(p, net_cls_cgrp_id,
+ rcu_read_lock_bh_held()));
}
EXPORT_SYMBOL_GPL(task_cls_state);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 05badbb58865..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
set_freezable();
- __set_current_state(TASK_RUNNING);
-
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
try_to_freeze();
}
- set_current_state(TASK_INTERRUPTIBLE);
pr_debug("%s stopping all device\n", t->tsk->comm);
pktgen_stop(t);
@@ -3571,13 +3568,6 @@ static int pktgen_thread_worker(void *arg)
pr_debug("%s removing thread\n", t->tsk->comm);
pktgen_rem_thread(t);
- /* Wait for kthread_stop */
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- __set_current_state(TASK_RUNNING);
-
return 0;
}
@@ -3769,6 +3759,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
}
t->net = pn;
+ get_task_struct(p);
wake_up_process(p);
wait_for_completion(&t->start_done);
@@ -3891,6 +3882,7 @@ static void __net_exit pg_net_exit(struct net *net)
t = list_entry(q, struct pktgen_thread, th_list);
list_del(&t->th_list);
kthread_stop(t->tsk);
+ put_task_struct(t->tsk);
kfree(t);
}
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next;
+ /* Because of following del_timer_sync(),
+ * we must release the spinlock here
+ * or risk a dead lock.
+ */
+ spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec);
- if (del_timer(&req->rsk_timer))
+ if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
+ spin_lock_bh(&queue->syn_wait_lock);
}
spin_unlock_bh(&queue->syn_wait_lock);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 01ced4a889e0..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
};
-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
- [IFLA_VF_INFO] = { .type = NLA_NESTED },
-};
-
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
@@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return 0;
}
-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
+static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
{
- int rem, err = -EINVAL;
- struct nlattr *vf;
const struct net_device_ops *ops = dev->netdev_ops;
+ int err = -EINVAL;
- nla_for_each_nested(vf, attr, rem) {
- switch (nla_type(vf)) {
- case IFLA_VF_MAC: {
- struct ifla_vf_mac *ivm;
- ivm = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_mac)
- err = ops->ndo_set_vf_mac(dev, ivm->vf,
- ivm->mac);
- break;
- }
- case IFLA_VF_VLAN: {
- struct ifla_vf_vlan *ivv;
- ivv = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_vlan)
- err = ops->ndo_set_vf_vlan(dev, ivv->vf,
- ivv->vlan,
- ivv->qos);
- break;
- }
- case IFLA_VF_TX_RATE: {
- struct ifla_vf_tx_rate *ivt;
- struct ifla_vf_info ivf;
- ivt = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_get_vf_config)
- err = ops->ndo_get_vf_config(dev, ivt->vf,
- &ivf);
- if (err)
- break;
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_rate)
- err = ops->ndo_set_vf_rate(dev, ivt->vf,
- ivf.min_tx_rate,
- ivt->rate);
- break;
- }
- case IFLA_VF_RATE: {
- struct ifla_vf_rate *ivt;
- ivt = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_rate)
- err = ops->ndo_set_vf_rate(dev, ivt->vf,
- ivt->min_tx_rate,
- ivt->max_tx_rate);
- break;
- }
- case IFLA_VF_SPOOFCHK: {
- struct ifla_vf_spoofchk *ivs;
- ivs = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_spoofchk)
- err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
- ivs->setting);
- break;
- }
- case IFLA_VF_LINK_STATE: {
- struct ifla_vf_link_state *ivl;
- ivl = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_link_state)
- err = ops->ndo_set_vf_link_state(dev, ivl->vf,
- ivl->link_state);
- break;
- }
- case IFLA_VF_RSS_QUERY_EN: {
- struct ifla_vf_rss_query_en *ivrssq_en;
+ if (tb[IFLA_VF_MAC]) {
+ struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
- ivrssq_en = nla_data(vf);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_rss_query_en)
- err = ops->ndo_set_vf_rss_query_en(dev,
- ivrssq_en->vf,
- ivrssq_en->setting);
- break;
- }
- default:
- err = -EINVAL;
- break;
- }
- if (err)
- break;
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_mac)
+ err = ops->ndo_set_vf_mac(dev, ivm->vf,
+ ivm->mac);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_VLAN]) {
+ struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_vlan)
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
+ ivv->qos);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_TX_RATE]) {
+ struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
+ struct ifla_vf_info ivf;
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_get_vf_config)
+ err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
+ if (err < 0)
+ return err;
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_rate)
+ err = ops->ndo_set_vf_rate(dev, ivt->vf,
+ ivf.min_tx_rate,
+ ivt->rate);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_RATE]) {
+ struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_rate)
+ err = ops->ndo_set_vf_rate(dev, ivt->vf,
+ ivt->min_tx_rate,
+ ivt->max_tx_rate);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_SPOOFCHK]) {
+ struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_spoofchk)
+ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+ ivs->setting);
+ if (err < 0)
+ return err;
}
+
+ if (tb[IFLA_VF_LINK_STATE]) {
+ struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_link_state)
+ err = ops->ndo_set_vf_link_state(dev, ivl->vf,
+ ivl->link_state);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_RSS_QUERY_EN]) {
+ struct ifla_vf_rss_query_en *ivrssq_en;
+
+ err = -EOPNOTSUPP;
+ ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
+ if (ops->ndo_set_vf_rss_query_en)
+ err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
+ ivrssq_en->setting);
+ if (err < 0)
+ return err;
+ }
+
return err;
}
@@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb,
}
if (tb[IFLA_VFINFO_LIST]) {
+ struct nlattr *vfinfo[IFLA_VF_MAX + 1];
struct nlattr *attr;
int rem;
+
nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
- if (nla_type(attr) != IFLA_VF_INFO) {
+ if (nla_type(attr) != IFLA_VF_INFO ||
+ nla_len(attr) < NLA_HDRLEN) {
err = -EINVAL;
goto errout;
}
- err = do_setvfinfo(dev, attr);
+ err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
+ ifla_vf_policy);
+ if (err < 0)
+ goto errout;
+ err = do_setvfinfo(dev, vfinfo);
if (err < 0)
goto errout;
status |= DO_SETLINK_NOTIFY;
@@ -1799,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
goto errout;
nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
- if (nla_type(attr) != IFLA_VF_PORT)
- continue;
- err = nla_parse_nested(port, IFLA_PORT_MAX,
- attr, ifla_port_policy);
+ if (nla_type(attr) != IFLA_VF_PORT ||
+ nla_len(attr) < NLA_HDRLEN) {
+ err = -EINVAL;
+ goto errout;
+ }
+ err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
+ ifla_port_policy);
if (err < 0)
goto errout;
if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..7b84330e5d30 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
if (skb && frag_size) {
skb->head_frag = 1;
- if (virt_to_head_page(data)->pfmemalloc)
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
skb->pfmemalloc = 1;
}
return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
- if (skb->len < len) {
- kfree_skb(skb);
+ if (skb->len < len)
return NULL;
- } else if (skb->len == len) {
+ else if (skb->len == len)
return skb;
- }
skb_chk = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
if (!skb_chk)
return NULL;
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
- return NULL;
+ goto err;
- if (!pskb_may_pull(skb_chk, offset)) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (!pskb_may_pull(skb_chk, offset))
+ goto err;
__skb_pull(skb_chk, offset);
ret = skb_chkf(skb_chk);
__skb_push(skb_chk, offset);
- if (ret) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (ret)
+ goto err;
return skb_chk;
+
+err:
+ if (skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return NULL;
+
}
EXPORT_SYMBOL(skb_checksum_trimmed);
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_copy(newsk, sk);
/* SANITY */
- get_net(sock_net(newsk));
+ if (likely(newsk->sk_net_refcnt))
+ get_net(sock_net(newsk));
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
* sk_wait_data - wait for data to arrive at sk_receive_queue
* @sk: sock to wait on
* @timeo: for how long
+ * @skb: last skb seen on sk_receive_queue
*
* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
*/
-int sk_wait_data(struct sock *sk, long *timeo)
+int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
{
int rc;
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
+ rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
finish_wait(sk_sleep(sk), &wait);
return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
break;
}
- sk_wait_data(sk, &timeo);
+ sk_wait_data(sk, &timeo, NULL);
continue;
found_ok_skb:
if (len > skb->len)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 392e29a0227d..b445d492c115 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -630,7 +630,7 @@ static int dsa_of_probe(struct device *dev)
continue;
cd->sw_addr = be32_to_cpup(sw_addr);
- if (cd->sw_addr > PHY_MAX_ADDR)
+ if (cd->sw_addr >= PHY_MAX_ADDR)
continue;
if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
@@ -642,6 +642,8 @@ static int dsa_of_probe(struct device *dev)
continue;
port_index = be32_to_cpup(port_reg);
+ if (port_index >= DSA_MAX_PORTS)
+ break;
port_name = of_get_property(port, "label", NULL);
if (!port_name)
@@ -666,8 +668,6 @@ static int dsa_of_probe(struct device *dev)
goto out_free_chip;
}
- if (port_index == DSA_MAX_PORTS)
- break;
}
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
return -ENODEV;
/* Use already configured phy mode */
- p->phy_interface = p->phy->interface;
+ if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+ p->phy_interface = p->phy->interface;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
} else {
fq->q.meat += skb->len;
}
- add_frag_mem_limit(&fq->q, skb->truesize);
+ add_frag_mem_limit(fq->q.net, skb->truesize);
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
clone->data_len = clone->len;
head->data_len -= clone->len;
head->len -= clone->len;
- add_frag_mem_limit(&fq->q, clone->truesize);
+ add_frag_mem_limit(fq->q.net, clone->truesize);
}
WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
}
fp = next;
}
- sub_frag_mem_limit(&fq->q, sum_truesize);
+ sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
- read_lock_bh(&neigh->lock);
- memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
- r->arp_flags = arp_state_to_flags(neigh);
- read_unlock_bh(&neigh->lock);
- r->arp_ha.sa_family = dev->type;
- strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
+ if (!(neigh->nud_state & NUD_NOARP)) {
+ read_lock_bh(&neigh->lock);
+ memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
+ r->arp_ha.sa_family = dev->type;
+ strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
+ err = 0;
+ }
neigh_release(neigh);
- err = 0;
}
return err;
}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
#include <net/route.h>
#include <net/tcp_states.h>
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_reset(sk);
- lock_sock(sk);
-
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_set(sk, &rt->dst);
err = 0;
out:
- release_sock(sk);
return err;
}
+EXPORT_SYMBOL(__ip4_datagram_connect);
+
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip4_datagram_connect(sk, uaddr, addr_len);
+ release_sock(sk);
+ return res;
+}
EXPORT_SYMBOL(ip4_datagram_connect);
/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7498716e8f54..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
queue_delayed_work(system_power_efficient_wq,
&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
- blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
}
return 0;
}
@@ -1740,6 +1739,8 @@ static int inet_netconf_msgsize_devconf(int type)
size += nla_total_size(4);
if (type == -1 || type == NETCONFA_PROXY_NEIGH)
size += nla_total_size(4);
+ if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+ size += nla_total_size(4);
return size;
}
@@ -1780,6 +1781,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
+ if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+ nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -1819,6 +1824,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
[NETCONFA_RP_FILTER] = { .len = sizeof(int) },
[NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
+ [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -2048,6 +2054,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
ifindex, cnf);
}
+ if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
+ new_value != old_value) {
+ ifindex = devinet_conf_ifindex(net, cnf);
+ inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ ifindex, cnf);
+ }
}
return ret;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
u8 fa_state;
u8 fa_slen;
u32 tb_id;
+ s16 fa_default;
struct rcu_head rcu;
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
}
/* Must be invoked inside of an RCU protected region. */
-void fib_select_default(struct fib_result *res)
+void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
{
struct fib_info *fi = NULL, *last_resort = NULL;
struct hlist_head *fa_head = res->fa_head;
struct fib_table *tb = res->table;
+ u8 slen = 32 - res->prefixlen;
int order = -1, last_idx = -1;
- struct fib_alias *fa;
+ struct fib_alias *fa, *fa1 = NULL;
+ u32 last_prio = res->fi->fib_priority;
+ u8 last_tos = 0;
hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
struct fib_info *next_fi = fa->fa_info;
+ if (fa->fa_slen != slen)
+ continue;
+ if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+ continue;
+ if (fa->tb_id != tb->tb_id)
+ continue;
+ if (next_fi->fib_priority > last_prio &&
+ fa->fa_tos == last_tos) {
+ if (last_tos)
+ continue;
+ break;
+ }
+ if (next_fi->fib_flags & RTNH_F_DEAD)
+ continue;
+ last_tos = fa->fa_tos;
+ last_prio = next_fi->fib_priority;
+
if (next_fi->fib_scope != res->scope ||
fa->fa_type != RTN_UNICAST)
continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
if (!next_fi->fib_nh[0].nh_gw ||
next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
if (!fi) {
if (next_fi != res->fi)
break;
+ fa1 = fa;
} else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
+ &last_idx, fa1->fa_default)) {
fib_result_assign(res, fi);
- tb->tb_default = order;
+ fa1->fa_default = order;
goto out;
}
fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
}
if (order <= 0 || !fi) {
- tb->tb_default = -1;
+ if (fa1)
+ fa1->fa_default = -1;
goto out;
}
if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
+ fa1->fa_default)) {
fib_result_assign(res, fi);
- tb->tb_default = order;
+ fa1->fa_default = order;
goto out;
}
if (last_idx >= 0)
fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
+ fa1->fa_default = last_idx;
out:
return;
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->fa_state = state & ~FA_S_ACCESSED;
new_fa->fa_slen = fa->fa_slen;
new_fa->tb_id = tb->tb_id;
+ new_fa->fa_default = -1;
err = switchdev_fib_ipv4_add(key, plen, fi,
new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->fa_state = 0;
new_fa->fa_slen = slen;
new_fa->tb_id = tb->tb_id;
+ new_fa->fa_default = -1;
/* (Optionally) offload fib entry to switch hardware. */
err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
- } else {
- leaf_pull_suffix(pn, n);
}
}
}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
- } else {
- leaf_pull_suffix(pn, n);
}
}
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
return NULL;
tb->tb_id = id;
- tb->tb_default = -1;
tb->tb_num_default = 0;
tb->tb_data = (alias ? alias->__data : tb->__data);
@@ -2468,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
key = l->key + 1;
iter->pos++;
- if (pos-- <= 0)
+ if (--pos <= 0)
break;
l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
struct sk_buff *skb_chk;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
}
spin_unlock(&queue->syn_wait_lock);
- if (del_timer(&req->rsk_timer))
+ if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9bc26677058e..c3b1f3a0f4cf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -152,8 +152,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
inet6_sk(sk)->tclass) < 0)
goto errout;
- if (ipv6_only_sock(sk) &&
- nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1))
+ if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
+ nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
goto errout;
}
#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
unsigned int evicted = 0;
HLIST_HEAD(expired);
-evict_again:
spin_lock(&hb->chain_lock);
hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
if (!inet_fragq_should_evict(fq))
continue;
- if (!del_timer(&fq->timer)) {
- /* q expiring right now thus increment its refcount so
- * it won't be freed under us and wait until the timer
- * has finished executing then destroy it
- */
- atomic_inc(&fq->refcnt);
- spin_unlock(&hb->chain_lock);
- del_timer_sync(&fq->timer);
- inet_frag_put(fq, f);
- goto evict_again;
- }
+ if (!del_timer(&fq->timer))
+ continue;
- fq->flags |= INET_FRAG_EVICTED;
- hlist_del(&fq->list);
- hlist_add_head(&fq->list, &expired);
+ hlist_add_head(&fq->list_evictor, &expired);
++evicted;
}
spin_unlock(&hb->chain_lock);
- hlist_for_each_entry_safe(fq, n, &expired, list)
+ hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
f->frag_expire((unsigned long) fq);
return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
int i;
nf->low_thresh = 0;
- local_bh_disable();
evict_again:
+ local_bh_disable();
seq = read_seqbegin(&f->rnd_seqlock);
for (i = 0; i < INETFRAGS_HASHSZ ; i++)
inet_evict_bucket(f, &f->hash[i]);
- if (read_seqretry(&f->rnd_seqlock, seq))
- goto evict_again;
-
local_bh_enable();
+ cond_resched();
+
+ if (read_seqretry(&f->rnd_seqlock, seq) ||
+ percpu_counter_sum(&nf->mem))
+ goto evict_again;
percpu_counter_destroy(&nf->mem);
}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
struct inet_frag_bucket *hb;
hb = get_frag_bucket_locked(fq, f);
- if (!(fq->flags & INET_FRAG_EVICTED))
- hlist_del(&fq->list);
+ hlist_del(&fq->list);
+ fq->flags |= INET_FRAG_COMPLETE;
spin_unlock(&hb->chain_lock);
}
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
if (!(fq->flags & INET_FRAG_COMPLETE)) {
fq_unlink(fq, f);
atomic_dec(&fq->refcnt);
- fq->flags |= INET_FRAG_COMPLETE;
}
}
EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
fp = xp;
}
sum = sum_truesize + f->qsize;
- sub_frag_mem_limit(q, sum);
if (f->destructor)
f->destructor(q);
kmem_cache_free(f->frags_cachep, q);
+
+ sub_frag_mem_limit(nf, sum);
}
EXPORT_SYMBOL(inet_frag_destroy);
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
q->net = nf;
f->constructor(q, arg);
- add_frag_mem_limit(q, f->qsize);
+ add_frag_mem_limit(nf, f->qsize);
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
+ unsigned int locksz = sizeof(spinlock_t);
unsigned int i, nblocks = 1;
- if (sizeof(spinlock_t) != 0) {
+ if (locksz != 0) {
/* allocate 2 cache lines or at least one spinlock per cpu */
- nblocks = max_t(unsigned int,
- 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
- 1);
+ nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
/* no more locks than number of hash buckets */
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
- hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
+ hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
GFP_KERNEL | __GFP_NOWARN);
if (!hashinfo->ehash_locks)
- hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+ hashinfo->ehash_locks = vmalloc(nblocks * locksz);
if (!hashinfo->ehash_locks)
return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
ipq_kill(qp);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
- if (!(qp->q.flags & INET_FRAG_EVICTED)) {
+ if (!inet_frag_evicting(&qp->q)) {
struct sk_buff *head = qp->q.fragments;
const struct iphdr *iph;
int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
kfree_skb(fp);
fp = xp;
} while (fp);
- sub_frag_mem_limit(&qp->q, sum_truesize);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
qp->q.flags = 0;
qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
ihl = ip_hdrlen(skb);
/* Determine the position of this fragment. */
- end = offset + skb->len - ihl;
+ end = offset + skb->len - skb_network_offset(skb) - ihl;
err = -EINVAL;
/* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
goto err;
err = -ENOMEM;
- if (!pskb_pull(skb, ihl))
+ if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
goto err;
err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
qp->q.fragments = next;
qp->q.meat -= free_it->len;
- sub_frag_mem_limit(&qp->q, free_it->truesize);
+ sub_frag_mem_limit(qp->q.net, free_it->truesize);
kfree_skb(free_it);
}
}
@@ -479,7 +479,7 @@ found:
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
qp->ecn |= ecn;
- add_frag_mem_limit(&qp->q, skb->truesize);
+ add_frag_mem_limit(qp->q.net, skb->truesize);
if (offset == 0)
qp->q.flags |= INET_FRAG_FIRST_IN;
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(&qp->q, clone->truesize);
+ add_frag_mem_limit(qp->q.net, clone->truesize);
}
skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
}
fp = next;
}
- sub_frag_mem_limit(&qp->q, sum_truesize);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
head->next = NULL;
head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
iph->frag_off = 0;
}
+ ip_send_check(iph);
+
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 4c2c3ba4ba65..626d9e56a6bd 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
EXPORT_SYMBOL(ip_tunnel_encap);
static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
- struct rtable *rt, __be16 df)
+ struct rtable *rt, __be16 df,
+ const struct iphdr *inner_iph)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
- (df & htons(IP_DF)) && mtu < pkt_size) {
+ (inner_iph->frag_off & htons(IP_DF)) &&
+ mtu < pkt_size) {
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
return -E2BIG;
@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
+ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
ip_rt_put(rt);
goto tx_error;
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 95c9b6eece25..92305a1a021a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
- struct arpt_entry *e, *back;
+ struct arpt_entry *e, **jumpstack;
const char *indev, *outdev;
const void *table_base;
+ unsigned int cpu, stackidx = 0;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
@@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
private = table->private;
+ cpu = smp_processor_id();
/*
* Ensure we load private-> members after we've fetched the base
* pointer.
*/
smp_read_barrier_depends();
table_base = private->entries;
+ jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
e = get_entry(table_base, private->hook_entry[hook]);
- back = get_entry(table_base, private->underflow[hook]);
acpar.in = state->in;
acpar.out = state->out;
@@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
verdict = (unsigned int)(-v) - 1;
break;
}
- e = back;
- back = get_entry(table_base, back->comefrom);
+ if (stackidx == 0) {
+ e = get_entry(table_base,
+ private->underflow[hook]);
+ } else {
+ e = jumpstack[--stackidx];
+ e = arpt_next_entry(e);
+ }
continue;
}
if (table_base + v
!= arpt_next_entry(e)) {
- /* Save old back ptr in next entry */
- struct arpt_entry *next = arpt_next_entry(e);
- next->comefrom = (void *)back - table_base;
- /* set back pointer to next entry */
- back = next;
+ if (stackidx >= private->stacksize) {
+ verdict = NF_DROP;
+ break;
+ }
+ jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
if (!res.prefixlen &&
res.table->tb_num_default > 1 &&
res.type == RTN_UNICAST && !fl4->flowi4_oif)
- fib_select_default(&res);
+ fib_select_default(fl4, &res);
if (!fl4->saddr)
fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one
},
{
.procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one
},
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
ret = -EAGAIN;
break;
}
- sk_wait_data(sk, &timeo);
+ sk_wait_data(sk, &timeo, NULL);
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int target; /* Read at least this many bytes */
long timeo;
struct task_struct *user_recv = NULL;
- struct sk_buff *skb;
+ struct sk_buff *skb, *last;
u32 urg_hole = 0;
if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Next get a buffer. */
+ last = skb_peek_tail(&sk->sk_receive_queue);
skb_queue_walk(&sk->sk_receive_queue, skb) {
+ last = skb;
/* Now that we have two receive queues this
* shouldn't happen.
*/
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
- } else
- sk_wait_data(sk, &timeo);
+ } else {
+ sk_wait_data(sk, &timeo, last);
+ }
if (user_recv) {
int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- bool new_recovery = false;
+ bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
!after(tp->high_seq, tp->snd_una) ||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
- new_recovery = true;
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = sk->sk_rx_dst;
+ dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
- if (dst)
- skb_dst_set_noref(skb, dst);
+ if (dst) {
+ /* DST_NOCACHE can not be used without taking a reference */
+ if (dst->flags & DST_NOCACHE) {
+ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+ skb_dst_set(skb, dst);
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
+ }
}
int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
return -EAFNOSUPPORT;
- err = ip4_datagram_connect(sk, uaddr, addr_len);
+ err = __ip4_datagram_connect(sk, uaddr, addr_len);
goto ipv4_connected;
}
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sin.sin_addr.s_addr = daddr->s6_addr32[3];
sin.sin_port = usin->sin6_port;
- err = ip4_datagram_connect(sk,
- (struct sockaddr *) &sin,
- sizeof(sin));
+ err = __ip4_datagram_connect(sk,
+ (struct sockaddr *) &sin,
+ sizeof(sin));
ipv4_connected:
if (err)
@@ -204,6 +204,16 @@ out:
fl6_sock_release(flowlabel);
return err;
}
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip6_datagram_connect(sk, uaddr, addr_len);
+ release_sock(sk);
+ return res;
+}
EXPORT_SYMBOL_GPL(ip6_datagram_connect);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
*ppcpu_rt = NULL;
}
}
+
+ non_pcpu_rt->rt6i_pcpu = NULL;
}
static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f2e464eba5ef..57990c929cd8 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
if (offset < 0)
goto out;
- if (!ipv6_is_mld(skb, nexthdr, offset))
- goto out;
+ if (ipv6_is_mld(skb, nexthdr, offset))
+ deliver = true;
- deliver = true;
+ goto out;
}
/* unknown RA - process it normally */
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
static const struct net_offload sit_offload = {
.callbacks = {
.gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
- .gro_complete = ipv6_gro_complete,
},
};
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
struct sk_buff *skb_chk = NULL;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ipv6_hdr(skb)->payload_len);
transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
* @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_change_info *change_info;
struct net *net = dev_net(dev);
struct inet6_dev *idev;
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
+ case NETDEV_CHANGE:
+ change_info = ptr;
+ if (change_info->flags_changed & IFF_NOARP)
+ neigh_changeaddr(&nd_tbl, dev);
+ break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
}
static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+ const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
- struct net *net = nf_ct_net((struct nf_conn *)nfct);
+ struct net *net = nf_ct_net(snet->tmpl);
struct dst_entry *dst;
struct flowi6 fl6;
@@ -83,7 +84,8 @@ free_nskb:
}
static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
}
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
- synproxy_send_client_synack(skb, th, &opts);
+ synproxy_send_client_synack(snet, skb, th, &opts);
return NF_DROP;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
fq->ecn |= ecn;
if (payload_len > fq->q.max_size)
fq->q.max_size = payload_len;
- add_frag_mem_limit(&fq->q, skb->truesize);
+ add_frag_mem_limit(fq->q.net, skb->truesize);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
clone->ip_summed = head->ip_summed;
NFCT_FRAG6_CB(clone)->orig = NULL;
- add_frag_mem_limit(&fq->q, clone->truesize);
+ add_frag_mem_limit(fq->q.net, clone->truesize);
}
/* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
}
- sub_frag_mem_limit(&fq->q, head->truesize);
+ sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1;
head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
- if (fq->q.flags & INET_FRAG_EVICTED)
+ if (inet_frag_evicting(&fq->q))
goto out_rcu_unlock;
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
fq->ecn |= ecn;
- add_frag_mem_limit(&fq->q, skb->truesize);
+ add_frag_mem_limit(fq->q.net, skb->truesize);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(&fq->q, clone->truesize);
+ add_frag_mem_limit(fq->q.net, clone->truesize);
}
/* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
}
fp = next;
}
- sub_frag_mem_limit(&fq->q, sum_truesize);
+ sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1a1122a6bbf5..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
static struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
- struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+ struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
if (rt) {
rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -369,10 +367,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
struct inet6_dev *idev;
dst_destroy_metrics_generic(dst);
-
- if (rt->rt6i_pcpu)
- free_percpu(rt->rt6i_pcpu);
-
+ free_percpu(rt->rt6i_pcpu);
rt6_uncached_list_del(rt);
idev = rt->rt6i_idev;
@@ -953,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
ort = (struct rt6_info *)ort->dst.from;
- rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
- 0, ort->rt6i_table);
+ rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
if (!rt)
return NULL;
@@ -986,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
struct rt6_info *pcpu_rt;
pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
- rt->dst.dev, rt->dst.flags,
- rt->rt6i_table);
+ rt->dst.dev, rt->dst.flags);
if (!pcpu_rt)
return NULL;
@@ -1000,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
{
- struct rt6_info *pcpu_rt, *prev, **p;
+ struct rt6_info *pcpu_rt, **p;
p = this_cpu_ptr(rt->rt6i_pcpu);
pcpu_rt = *p;
- if (pcpu_rt)
- goto done;
+ if (pcpu_rt) {
+ dst_hold(&pcpu_rt->dst);
+ rt6_dst_from_metrics_check(pcpu_rt);
+ }
+ return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+ struct fib6_table *table = rt->rt6i_table;
+ struct rt6_info *pcpu_rt, *prev, **p;
pcpu_rt = ip6_rt_pcpu_alloc(rt);
if (!pcpu_rt) {
struct net *net = dev_net(rt->dst.dev);
- pcpu_rt = net->ipv6.ip6_null_entry;
- goto done;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return net->ipv6.ip6_null_entry;
}
- prev = cmpxchg(p, NULL, pcpu_rt);
- if (prev) {
- /* If someone did it before us, return prev instead */
+ read_lock_bh(&table->tb6_lock);
+ if (rt->rt6i_pcpu) {
+ p = this_cpu_ptr(rt->rt6i_pcpu);
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ if (prev) {
+ /* If someone did it before us, return prev instead */
+ dst_destroy(&pcpu_rt->dst);
+ pcpu_rt = prev;
+ }
+ } else {
+ /* rt has been removed from the fib6 tree
+ * before we have a chance to acquire the read_lock.
+ * In this case, don't brother to create a pcpu rt
+ * since rt is going away anyway. The next
+ * dst_check() will trigger a re-lookup.
+ */
dst_destroy(&pcpu_rt->dst);
- pcpu_rt = prev;
+ pcpu_rt = rt;
}
-
-done:
dst_hold(&pcpu_rt->dst);
rt6_dst_from_metrics_check(pcpu_rt);
+ read_unlock_bh(&table->tb6_lock);
return pcpu_rt;
}
@@ -1100,9 +1114,22 @@ redo_rt6_select:
rt->dst.lastuse = jiffies;
rt->dst.__use++;
pcpu_rt = rt6_get_pcpu_route(rt);
- read_unlock_bh(&table->tb6_lock);
+
+ if (pcpu_rt) {
+ read_unlock_bh(&table->tb6_lock);
+ } else {
+ /* We have to do the read_unlock first
+ * because rt6_make_pcpu_route() may trigger
+ * ip6_dst_gc() which will take the write_lock.
+ */
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ pcpu_rt = rt6_make_pcpu_route(rt);
+ dst_release(&rt->dst);
+ }
return pcpu_rt;
+
}
}
@@ -1558,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(net, dev, 0, NULL);
+ rt = ip6_dst_alloc(net, dev, 0);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
@@ -1745,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL,
+ (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
@@ -1834,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type;
gw_addr = &cfg->fc_gateway;
+ gwa_type = ipv6_addr_type(gw_addr);
/* if gw_addr is local we will fail to detect this in case
* address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1841,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
* prefix route was assigned to, which might be non-loopback.
*/
err = -EINVAL;
- if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+ if (ipv6_chk_addr_and_flags(net, gw_addr,
+ gwa_type & IPV6_ADDR_LINKLOCAL ?
+ dev : NULL, 0, 0))
goto out;
rt->rt6i_gateway = *gw_addr;
- gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
struct rt6_info *grt;
@@ -2400,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT, NULL);
+ DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
if (signal_pending(current))
break;
rc = 0;
- if (sk_wait_data(sk, &timeo))
+ if (sk_wait_data(sk, &timeo, NULL))
break;
}
return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
release_sock(sk);
lock_sock(sk);
} else
- sk_wait_data(sk, &timeo);
+ sk_wait_data(sk, &timeo, NULL);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
debugfs_remove_recursive(sdata->vif.debugfs_dir);
sdata->vif.debugfs_dir = NULL;
+ sdata->debugfs.subdir_stations = NULL;
}
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
ieee80211_teardown_sdata(sdata);
}
-/*
- * Remove all interfaces, may only be called at hardware unregistration
- * time because it doesn't do RCU-safe list removals.
- */
void ieee80211_remove_interfaces(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
ASSERT_RTNL();
- /*
- * Close all AP_VLAN interfaces first, as otherwise they
- * might be closed while the AP interface they belong to
- * is closed, causing unregister_netdevice_many() to crash.
+ /* Before destroying the interfaces, make sure they're all stopped so
+ * that the hardware is stopped. Otherwise, the driver might still be
+ * iterating the interfaces during the shutdown, e.g. from a worker
+ * or from RX processing or similar, and if it does so (using atomic
+ * iteration) while we're manipulating the list, the iteration will
+ * crash.
+ *
+ * After this, the hardware should be stopped and the driver should
+ * have stopped all of its activities, so that we can do RCU-unaware
+ * manipulations of the interface list below.
*/
- list_for_each_entry(sdata, &local->interfaces, list)
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- dev_close(sdata->dev);
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
+ WARN(local->open_count, "%s: open count remains %d\n",
+ wiphy_name(local->hw.wiphy), local->open_count);
mutex_lock(&local->iflist_mtx);
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
/* AID */
pos = skb_put(skb, 2);
- put_unaligned_le16(plid, pos + 2);
+ put_unaligned_le16(plid, pos);
}
if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
WLAN_SP_MESH_PEERING_CONFIRM) {
baseaddr += 4;
baselen += 4;
+
+ if (baselen > len)
+ return;
}
ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
ieee80211_mgd_quiesce(sdata);
+ /* If suspended during TX in progress, and wowlan
+ * is enabled (connection will be active) there
+ * can be a race where the driver is put out
+ * of power-save due to TX and during suspend
+ * dynamic_ps_timer is cancelled and TX packet
+ * is flushed, leaving the driver in ACTIVE even
+ * after resuming until dynamic_ps_timer puts
+ * driver back in DOZE.
+ */
+ if (sdata->u.mgd.associated &&
+ sdata->u.mgd.powersave &&
+ !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local,
+ IEEE80211_CONF_CHANGE_PS);
+ }
}
err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
- int j = MAX_THR_RATES;
- struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+ int j;
+ struct minstrel_rate_stats *tmp_mrs;
struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
- while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
- j--;
+ for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ break;
}
if (j < MAX_THR_RATES - 1)
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *ch;
struct cfg80211_chan_def chandef;
int i, subband_start;
+ struct wiphy *wiphy = sdata->local->hw.wiphy;
for (i = start; i <= end; i += spacing) {
if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
/* we will be active on the channel */
cfg80211_chandef_create(&chandef, ch,
NL80211_CHAN_NO_HT);
- if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
- &chandef,
- sdata->wdev.iftype)) {
+ if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
+ sdata->wdev.iftype)) {
ch_cnt++;
/*
* check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
queued = true;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
+ info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
+ IEEE80211_TX_CTL_NO_PS_BUFFER |
+ IEEE80211_TX_STATUS_EOSP;
__skb_queue_tail(&tid_tx->pending, skb);
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* return *ignored=0 i.e. ICMP and NF_DROP
*/
sched = rcu_dereference(svc->scheduler);
- dest = sched->schedule(svc, skb, iph);
+ if (sched) {
+ /* read svc->sched_data after svc->scheduler */
+ smp_rmb();
+ dest = sched->schedule(svc, skb, iph);
+ } else {
+ dest = NULL;
+ }
if (!dest) {
IP_VS_DBG(1, "p-schedule: no dest found.\n");
kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
}
sched = rcu_dereference(svc->scheduler);
- dest = sched->schedule(svc, skb, iph);
+ if (sched) {
+ /* read svc->sched_data after svc->scheduler */
+ smp_rmb();
+ dest = sched->schedule(svc, skb, iph);
+ } else {
+ dest = NULL;
+ }
if (dest == NULL) {
IP_VS_DBG(1, "Schedule: no dest found.\n");
return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
__ip_vs_dst_cache_reset(dest);
spin_unlock_bh(&dest->dst_lock);
- sched = rcu_dereference_protected(svc->scheduler, 1);
if (add) {
ip_vs_start_estimator(svc->net, &dest->stats);
list_add_rcu(&dest->n_list, &svc->destinations);
svc->num_dests++;
- if (sched->add_dest)
+ sched = rcu_dereference_protected(svc->scheduler, 1);
+ if (sched && sched->add_dest)
sched->add_dest(svc, dest);
} else {
- if (sched->upd_dest)
+ sched = rcu_dereference_protected(svc->scheduler, 1);
+ if (sched && sched->upd_dest)
sched->upd_dest(svc, dest);
}
}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
struct ip_vs_scheduler *sched;
sched = rcu_dereference_protected(svc->scheduler, 1);
- if (sched->del_dest)
+ if (sched && sched->del_dest)
sched->del_dest(svc, dest);
}
}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
ip_vs_use_count_inc();
/* Lookup the scheduler by 'u->sched_name' */
- sched = ip_vs_scheduler_get(u->sched_name);
- if (sched == NULL) {
- pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
- ret = -ENOENT;
- goto out_err;
+ if (strcmp(u->sched_name, "none")) {
+ sched = ip_vs_scheduler_get(u->sched_name);
+ if (!sched) {
+ pr_info("Scheduler module ip_vs_%s not found\n",
+ u->sched_name);
+ ret = -ENOENT;
+ goto out_err;
+ }
}
if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */
- ret = ip_vs_bind_scheduler(svc, sched);
- if (ret)
- goto out_err;
- sched = NULL;
+ if (sched) {
+ ret = ip_vs_bind_scheduler(svc, sched);
+ if (ret)
+ goto out_err;
+ sched = NULL;
+ }
/* Bind the ct retriever */
RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
static int
ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
{
- struct ip_vs_scheduler *sched, *old_sched;
+ struct ip_vs_scheduler *sched = NULL, *old_sched;
struct ip_vs_pe *pe = NULL, *old_pe = NULL;
int ret = 0;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
- sched = ip_vs_scheduler_get(u->sched_name);
- if (sched == NULL) {
- pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
- return -ENOENT;
+ if (strcmp(u->sched_name, "none")) {
+ sched = ip_vs_scheduler_get(u->sched_name);
+ if (!sched) {
+ pr_info("Scheduler module ip_vs_%s not found\n",
+ u->sched_name);
+ return -ENOENT;
+ }
}
old_sched = sched;
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
old_sched = rcu_dereference_protected(svc->scheduler, 1);
if (sched != old_sched) {
+ if (old_sched) {
+ ip_vs_unbind_scheduler(svc, old_sched);
+ RCU_INIT_POINTER(svc->scheduler, NULL);
+ /* Wait all svc->sched_data users */
+ synchronize_rcu();
+ }
/* Bind the new scheduler */
- ret = ip_vs_bind_scheduler(svc, sched);
- if (ret) {
- old_sched = sched;
- goto out;
+ if (sched) {
+ ret = ip_vs_bind_scheduler(svc, sched);
+ if (ret) {
+ ip_vs_scheduler_put(sched);
+ goto out;
+ }
}
- /* Unbind the old scheduler on success */
- ip_vs_unbind_scheduler(svc, old_sched);
}
/*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
const struct ip_vs_iter *iter = seq->private;
const struct ip_vs_dest *dest;
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
+ char *sched_name = sched ? sched->name : "none";
if (iter->table == ip_vs_svc_table) {
#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
ip_vs_proto_name(svc->protocol),
&svc->addr.in6,
ntohs(svc->port),
- sched->name);
+ sched_name);
else
#endif
seq_printf(seq, "%s %08X:%04X %s %s ",
ip_vs_proto_name(svc->protocol),
ntohl(svc->addr.ip),
ntohs(svc->port),
- sched->name,
+ sched_name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
} else {
seq_printf(seq, "FWM %08X %s %s",
- svc->fwmark, sched->name,
+ svc->fwmark, sched_name,
(svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
}
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
struct ip_vs_scheduler *sched;
struct ip_vs_kstats kstats;
+ char *sched_name;
sched = rcu_dereference_protected(src->scheduler, 1);
+ sched_name = sched ? sched->name : "none";
dst->protocol = src->protocol;
dst->addr = src->addr.ip;
dst->port = src->port;
dst->fwmark = src->fwmark;
- strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
+ strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
struct ip_vs_kstats kstats;
+ char *sched_name;
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
}
sched = rcu_dereference_protected(svc->scheduler, 1);
+ sched_name = sched ? sched->name : "none";
pe = rcu_dereference_protected(svc->pe, 1);
- if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
+ if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
(pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
if (sched->done_service)
sched->done_service(svc);
- /* svc->scheduler can not be set to NULL */
+ /* svc->scheduler can be set to NULL only by caller */
}
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
{
- struct ip_vs_scheduler *sched;
+ struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
+ char *sched_name = sched ? sched->name : "none";
- sched = rcu_dereference(svc->scheduler);
if (svc->fwmark) {
IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
- sched->name, svc->fwmark, svc->fwmark, msg);
+ sched_name, svc->fwmark, svc->fwmark, msg);
#ifdef CONFIG_IP_VS_IPV6
} else if (svc->af == AF_INET6) {
IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
- sched->name, ip_vs_proto_name(svc->protocol),
+ sched_name, ip_vs_proto_name(svc->protocol),
&svc->addr.in6, ntohs(svc->port), msg);
#endif
} else {
IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
- sched->name, ip_vs_proto_name(svc->protocol),
+ sched_name, ip_vs_proto_name(svc->protocol),
&svc->addr.ip, ntohs(svc->port), msg);
}
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
pkts = atomic_add_return(1, &cp->in_pkts);
else
pkts = sysctl_sync_threshold(ipvs);
- ip_vs_sync_conn(net, cp->control, pkts);
+ ip_vs_sync_conn(net, cp, pkts);
}
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
- fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
FLOWI_FLAG_KNOWN_NH : 0;
@@ -505,6 +504,13 @@ err_put:
return -1;
err_unreach:
+ /* The ip6_link_failure function requires the dev field to be set
+ * in order to get the net (further for the sake of fwmark
+ * reflection).
+ */
+ if (!skb->dev)
+ skb->dev = skb_dst(skb)->dev;
+
dst_link_failure(skb);
return -1;
}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
if (ret == NF_ACCEPT) {
nf_reset(skb);
skb_forward_csum(skb);
+ if (!skb->sk)
+ skb_sender_cpu_clear(skb);
}
return ret;
}
+/* In the event of a remote destination, it's possible that we would have
+ * matches against an old socket (particularly a TIME-WAIT socket). This
+ * causes havoc down the line (ip_local_out et. al. expect regular sockets
+ * and invalid memory accesses will happen) so simply drop the association
+ * in this case.
+*/
+static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
+{
+ /* If dev is set, the packet came from the LOCAL_IN callback and
+ * not from a local TCP socket.
+ */
+ if (skb->dev)
+ skb_orphan(skb);
+}
+
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 1);
+
+ /* Remove the early_demux association unless it's bound for the
+ * exact same port and address on this host after translation.
+ */
+ if (!local || cp->vport != cp->dport ||
+ !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
+ ip_vs_drop_early_demux_sk(skb);
+
if (!local) {
skb_forward_csum(skb);
+ if (!skb->sk)
+ skb_sender_cpu_clear(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output_sk);
} else
ret = NF_ACCEPT;
+
return ret;
}
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
if (!local) {
+ ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
+ if (!skb->sk)
+ skb_sender_cpu_clear(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output_sk);
} else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
struct ipv6hdr *old_ipv6h = NULL;
#endif
+ ip_vs_drop_early_demux_sk(skb);
+
if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
spin_unlock(&pcpu->lock);
}
+/* Released via destroy_conntrack() */
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+{
+ struct nf_conn *tmpl;
+
+ tmpl = kzalloc(sizeof(*tmpl), flags);
+ if (tmpl == NULL)
+ return NULL;
+
+ tmpl->status = IPS_TEMPLATE;
+ write_pnet(&tmpl->ct_net, net);
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (zone) {
+ struct nf_conntrack_zone *nf_ct_zone;
+
+ nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
+ if (!nf_ct_zone)
+ goto out_free;
+ nf_ct_zone->id = zone;
+ }
+#endif
+ atomic_set(&tmpl->ct_general.use, 0);
+
+ return tmpl;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+out_free:
+ kfree(tmpl);
+ return NULL;
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
+
+static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+{
+ nf_ct_ext_destroy(tmpl);
+ nf_ct_ext_free(tmpl);
+ kfree(tmpl);
+}
+
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
NF_CT_ASSERT(!timer_pending(&ct->timeout));
+ if (unlikely(nf_ct_is_template(ct))) {
+ nf_ct_tmpl_free(ct);
+ return;
+ }
rcu_read_lock();
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
-/* deletion from this larval template list happens via nf_ct_put() */
-void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
-{
- struct ct_pcpu *pcpu;
-
- __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
- __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
- nf_conntrack_get(&tmpl->ct_general);
-
- /* add this conntrack to the (per cpu) tmpl list */
- local_bh_disable();
- tmpl->cpu = smp_processor_id();
- pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
-
- spin_lock(&pcpu->lock);
- /* Overload tuple linked list to put us in template list. */
- hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &pcpu->tmpl);
- spin_unlock_bh(&pcpu->lock);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
-
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1522,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
- if (!hash) {
- printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+ if (!hash)
hash = vzalloc(sz);
- }
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
@@ -1751,7 +1771,6 @@ int nf_conntrack_init_net(struct net *net)
spin_lock_init(&pcpu->lock);
INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
- INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
}
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
}
- return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
+ nf_ct_zone(a->master) == nf_ct_zone(b->master);
}
static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
}
err = nf_ct_expect_related_report(exp, portid, report);
- if (err < 0)
- goto err_exp;
-
- return 0;
-err_exp:
nf_ct_expect_put(exp);
err_ct:
nf_ct_put(ct);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index cd60d397fe05..8a8b2abc35ff 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
if (verdict == NF_ACCEPT) {
next_hook:
- verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
+ verdict = nf_iterate(entry->state.hook_list,
skb, &entry->state, &elem);
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,23 +349,20 @@ static void __net_exit synproxy_proc_exit(struct net *net)
static int __net_init synproxy_net_init(struct net *net)
{
struct synproxy_net *snet = synproxy_pernet(net);
- struct nf_conntrack_tuple t;
struct nf_conn *ct;
int err = -ENOMEM;
- memset(&t, 0, sizeof(t));
- ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
- if (IS_ERR(ct)) {
- err = PTR_ERR(ct);
+ ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+ if (!ct)
goto err1;
- }
if (!nfct_seqadj_ext_add(ct))
goto err2;
if (!nfct_synproxy_ext_add(ct))
goto err2;
- nf_conntrack_tmpl_insert(net, ct);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ nf_conntrack_get(&ct->ct_general);
snet->tmpl = ct;
snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 8b117c90ecd7..0c0e8ecf02ab 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
}
}
+enum {
+ NFNL_BATCH_FAILURE = (1 << 0),
+ NFNL_BATCH_DONE = (1 << 1),
+ NFNL_BATCH_REPLAY = (1 << 2),
+};
+
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u_int16_t subsys_id)
{
@@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
- bool success = true, done = false;
static LIST_HEAD(err_list);
+ u32 status;
int err;
if (subsys_id >= NFNL_SUBSYS_COUNT)
return netlink_ack(skb, nlh, -EINVAL);
replay:
+ status = 0;
+
skb = netlink_skb_clone(oskb, GFP_KERNEL);
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM);
@@ -336,10 +344,10 @@ replay:
if (type == NFNL_MSG_BATCH_BEGIN) {
/* Malformed: Batch begin twice */
nfnl_err_reset(&err_list);
- success = false;
+ status |= NFNL_BATCH_FAILURE;
goto done;
} else if (type == NFNL_MSG_BATCH_END) {
- done = true;
+ status |= NFNL_BATCH_DONE;
goto done;
} else if (type < NLMSG_MIN_TYPE) {
err = -EINVAL;
@@ -382,11 +390,8 @@ replay:
* original skb.
*/
if (err == -EAGAIN) {
- nfnl_err_reset(&err_list);
- ss->abort(oskb);
- nfnl_unlock(subsys_id);
- kfree_skb(skb);
- goto replay;
+ status |= NFNL_BATCH_REPLAY;
+ goto next;
}
}
ack:
@@ -402,7 +407,7 @@ ack:
*/
nfnl_err_reset(&err_list);
netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
- success = false;
+ status |= NFNL_BATCH_FAILURE;
goto done;
}
/* We don't stop processing the batch on errors, thus,
@@ -410,19 +415,26 @@ ack:
* triggers.
*/
if (err)
- success = false;
+ status |= NFNL_BATCH_FAILURE;
}
-
+next:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
done:
- if (success && done)
+ if (status & NFNL_BATCH_REPLAY) {
+ ss->abort(oskb);
+ nfnl_err_reset(&err_list);
+ nfnl_unlock(subsys_id);
+ kfree_skb(skb);
+ goto replay;
+ } else if (status == NFNL_BATCH_DONE) {
ss->commit(oskb);
- else
+ } else {
ss->abort(oskb);
+ }
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
- struct nf_conntrack_tuple t;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
@@ -202,11 +201,11 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
if (ret < 0)
goto err1;
- memset(&t, 0, sizeof(t));
- ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
- ret = PTR_ERR(ct);
- if (IS_ERR(ct))
+ ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+ if (!ct) {
+ ret = -ENOMEM;
goto err2;
+ }
ret = 0;
if ((info->ct_events || info->exp_events) &&
@@ -227,8 +226,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
if (ret < 0)
goto err3;
}
-
- nf_conntrack_tmpl_insert(par->net, ct);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ nf_conntrack_get(&ct->ct_general);
out:
info->ct = ct;
return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
goto out;
}
+ sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dea925388a5b..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
out:
spin_unlock(&netlink_tap_lock);
- if (found && nt->module)
+ if (found)
module_put(nt->module);
return found ? 0 : -ENODEV;
@@ -357,25 +357,52 @@ err1:
return NULL;
}
+
+static void
+__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
+ unsigned int order)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct sk_buff_head *queue;
+ struct netlink_ring *ring;
+
+ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+
+ spin_lock_bh(&queue->lock);
+
+ ring->frame_max = req->nm_frame_nr - 1;
+ ring->head = 0;
+ ring->frame_size = req->nm_frame_size;
+ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
+
+ swap(ring->pg_vec_len, req->nm_block_nr);
+ swap(ring->pg_vec_order, order);
+ swap(ring->pg_vec, pg_vec);
+
+ __skb_queue_purge(queue);
+ spin_unlock_bh(&queue->lock);
+
+ WARN_ON(atomic_read(&nlk->mapped));
+
+ if (pg_vec)
+ free_pg_vec(pg_vec, order, req->nm_block_nr);
+}
+
static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
- bool closing, bool tx_ring)
+ bool tx_ring)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring;
- struct sk_buff_head *queue;
void **pg_vec = NULL;
unsigned int order = 0;
- int err;
ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
- if (!closing) {
- if (atomic_read(&nlk->mapped))
- return -EBUSY;
- if (atomic_read(&ring->pending))
- return -EBUSY;
- }
+ if (atomic_read(&nlk->mapped))
+ return -EBUSY;
+ if (atomic_read(&ring->pending))
+ return -EBUSY;
if (req->nm_block_nr) {
if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
return -EINVAL;
}
- err = -EBUSY;
mutex_lock(&nlk->pg_vec_lock);
- if (closing || atomic_read(&nlk->mapped) == 0) {
- err = 0;
- spin_lock_bh(&queue->lock);
-
- ring->frame_max = req->nm_frame_nr - 1;
- ring->head = 0;
- ring->frame_size = req->nm_frame_size;
- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
-
- swap(ring->pg_vec_len, req->nm_block_nr);
- swap(ring->pg_vec_order, order);
- swap(ring->pg_vec, pg_vec);
-
- __skb_queue_purge(queue);
- spin_unlock_bh(&queue->lock);
-
- WARN_ON(atomic_read(&nlk->mapped));
+ if (atomic_read(&nlk->mapped) == 0) {
+ __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
+ mutex_unlock(&nlk->pg_vec_lock);
+ return 0;
}
+
mutex_unlock(&nlk->pg_vec_lock);
if (pg_vec)
free_pg_vec(pg_vec, order, req->nm_block_nr);
- return err;
+
+ return -EBUSY;
}
static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
memset(&req, 0, sizeof(req));
if (nlk->rx_ring.pg_vec)
- netlink_set_ring(sk, &req, true, false);
+ __netlink_set_ring(sk, &req, false, NULL, 0);
memset(&req, 0, sizeof(req));
if (nlk->tx_ring.pg_vec)
- netlink_set_ring(sk, &req, true, true);
+ __netlink_set_ring(sk, &req, true, NULL, 0);
}
#endif /* CONFIG_NETLINK_MMAP */
@@ -1081,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
err = __netlink_insert(table, sk);
if (err) {
+ /* In case the hashtable backend returns with -EBUSY
+ * from here, it must not escape to the caller.
+ */
+ if (unlikely(err == -EBUSY))
+ err = -EOVERFLOW;
if (err == -EEXIST)
err = -EADDRINUSE;
nlk_sk(sk)->portid = 0;
@@ -2223,7 +2243,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
if (copy_from_user(&req, optval, sizeof(req)))
return -EFAULT;
- err = netlink_set_ring(sk, &req, false,
+ err = netlink_set_ring(sk, &req,
optname == NETLINK_TX_RING);
break;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
- __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+ __be32 addr, __be32 new_addr)
{
int transport_len = skb->len - skb_transport_offset(skb);
+ if (nh->frag_off & htons(IP_OFFSET))
+ return;
+
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
}
+}
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
+{
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb);
*addr = new_addr;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
- + (num_possible_nodes()
+ + (nr_node_ids
* sizeof(struct flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
addr, hlen);
- if (tp_len > dev->mtu + dev->hard_header_len) {
+ if (likely(tp_len >= 0) &&
+ tp_len > dev->mtu + dev->hard_header_len) {
struct ethhdr *ehdr;
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
- const struct net_device *dev_curr;
+ struct net_device *dev_curr;
__be16 proto_curr;
bool need_rehook;
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
po->num = proto;
po->prot_hook.type = proto;
-
- if (po->prot_hook.dev)
- dev_put(po->prot_hook.dev);
-
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
packet_cached_dev_assign(po, dev);
}
+ if (dev_curr)
+ dev_put(dev_curr);
if (proto == 0 || !need_rehook)
goto out_unlock;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 273b8bff6ba4..657ba9f5d308 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
}
ibmr = rds_ib_alloc_fmr(rds_ibdev);
- if (IS_ERR(ibmr))
+ if (IS_ERR(ibmr)) {
+ rds_ib_dev_put(rds_ibdev);
return ibmr;
+ }
ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
if (ret == 0)
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 8b4a6cd2c3a7..83498e1c75b8 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister);
void rds_trans_put(struct rds_transport *trans)
{
- if (trans && trans->t_owner)
+ if (trans)
module_put(trans->t_owner);
}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
}
EXPORT_SYMBOL(tcf_hash_destroy);
-int tcf_hash_release(struct tc_action *a, int bind)
+int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
{
struct tcf_common *p = a->priv;
int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
if (p) {
if (bind)
p->tcfc_bindcnt--;
- else if (p->tcfc_bindcnt > 0)
+ else if (strict && p->tcfc_bindcnt > 0)
return -EPERM;
p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
ret = 1;
}
}
+
return ret;
}
-EXPORT_SYMBOL(tcf_hash_release);
+EXPORT_SYMBOL(__tcf_hash_release);
static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
hlist_for_each_entry_safe(p, n, head, tcfc_head) {
a->priv = p;
- ret = tcf_hash_release(a, 0);
+ ret = __tcf_hash_release(a, false, true);
if (ret == ACT_P_DELETED) {
module_put(a->ops->owner);
n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
int ret = 0;
list_for_each_entry_safe(a, tmp, actions, list) {
- ret = tcf_hash_release(a, bind);
+ ret = __tcf_hash_release(a, bind, true);
if (ret == ACT_P_DELETED)
module_put(a->ops->owner);
else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
struct tcf_bpf_cfg {
struct bpf_prog *filter;
struct sock_filter *bpf_ops;
- char *bpf_name;
+ const char *bpf_name;
u32 bpf_fd;
u16 bpf_num_ops;
+ bool is_ebpf;
};
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
cfg->bpf_ops = bpf_ops;
cfg->bpf_num_ops = bpf_num_ops;
cfg->filter = fp;
+ cfg->is_ebpf = false;
return 0;
}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
cfg->bpf_fd = bpf_fd;
cfg->bpf_name = name;
cfg->filter = fp;
+ cfg->is_ebpf = true;
return 0;
}
+static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
+{
+ if (cfg->is_ebpf)
+ bpf_prog_put(cfg->filter);
+ else
+ bpf_prog_destroy(cfg->filter);
+
+ kfree(cfg->bpf_ops);
+ kfree(cfg->bpf_name);
+}
+
+static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
+ struct tcf_bpf_cfg *cfg)
+{
+ cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
+ cfg->filter = prog->filter;
+
+ cfg->bpf_ops = prog->bpf_ops;
+ cfg->bpf_name = prog->bpf_name;
+}
+
static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *act,
int replace, int bind)
{
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+ struct tcf_bpf_cfg cfg, old;
struct tc_act_bpf *parm;
struct tcf_bpf *prog;
- struct tcf_bpf_cfg cfg;
bool is_bpf, is_ebpf;
int ret;
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
prog = to_bpf(act);
spin_lock_bh(&prog->tcf_lock);
+ if (ret != ACT_P_CREATED)
+ tcf_bpf_prog_fill_cfg(prog, &old);
+
prog->bpf_ops = cfg.bpf_ops;
prog->bpf_name = cfg.bpf_name;
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (ret == ACT_P_CREATED)
tcf_hash_insert(act);
+ else
+ tcf_bpf_cfg_cleanup(&old);
return ret;
destroy_fp:
- if (is_ebpf)
- bpf_prog_put(cfg.filter);
- else
- bpf_prog_destroy(cfg.filter);
-
- kfree(cfg.bpf_ops);
- kfree(cfg.bpf_name);
-
+ tcf_bpf_cfg_cleanup(&cfg);
return ret;
}
static void tcf_bpf_cleanup(struct tc_action *act, int bind)
{
- const struct tcf_bpf *prog = act->priv;
+ struct tcf_bpf_cfg tmp;
- if (tcf_bpf_is_ebpf(prog))
- bpf_prog_put(prog->filter);
- else
- bpf_prog_destroy(prog->filter);
+ tcf_bpf_prog_fill_cfg(act->priv, &tmp);
+ tcf_bpf_cfg_cleanup(&tmp);
}
static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return ret;
ret = ACT_P_CREATED;
} else {
+ if (bind)
+ return 0;
if (!ovr) {
tcf_hash_release(a, bind);
return -EEXIST;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
}
ret = ACT_P_CREATED;
} else {
- p = to_pedit(a);
- tcf_hash_release(a, bind);
if (bind)
return 0;
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
-
+ p = to_pedit(a);
if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
goto errout;
if (oldprog) {
- list_replace_rcu(&prog->link, &oldprog->link);
+ list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
} else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (!fnew)
goto err2;
+ tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+
fold = (struct flow_filter *)*arg;
if (fold) {
err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
fnew->mask = ~0U;
fnew->tp = tp;
get_random_bytes(&fnew->hashrnd, 4);
- tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
}
fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (*arg == 0)
list_add_tail_rcu(&fnew->list, &head->filters);
else
- list_replace_rcu(&fnew->list, &fold->list);
+ list_replace_rcu(&fold->list, &fnew->list);
*arg = (unsigned long)fnew;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
*arg = (unsigned long) fnew;
if (fold) {
- list_replace_rcu(&fnew->list, &fold->list);
+ list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
call_rcu(&fold->rcu, fl_destroy_filter);
} else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
+ while (q->head != q->tail) {
+ struct sk_buff *skb = q->tab[q->head];
+
+ q->head = (q->head + 1) & q->tab_mask;
+ if (!skb)
+ continue;
+ qdisc_qstats_backlog_dec(sch, skb);
+ --sch->q.qlen;
+ qdisc_drop(skb, sch);
+ }
+
+ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+ q->head = q->tail = 0;
red_restart(&q->vars);
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
skb = dequeue_head(flow);
len = qdisc_pkt_len(skb);
q->backlogs[idx] -= len;
- kfree_skb(skb);
sch->q.qlen--;
qdisc_qstats_drop(sch);
qdisc_qstats_backlog_dec(sch, skb);
+ kfree_skb(skb);
flow->dropped++;
return idx;
}
+static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
+{
+ unsigned int prev_backlog;
+
+ prev_backlog = sch->qstats.backlog;
+ fq_codel_drop(sch);
+ return prev_backlog - sch->qstats.backlog;
+}
+
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -279,10 +288,26 @@ begin:
static void fq_codel_reset(struct Qdisc *sch)
{
- struct sk_buff *skb;
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
- while ((skb = fq_codel_dequeue(sch)) != NULL)
- kfree_skb(skb);
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ while (flow->head) {
+ struct sk_buff *skb = dequeue_head(flow);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ kfree_skb(skb);
+ }
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ codel_vars_init(&flow->cvars);
+ }
+ memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+ sch->q.qlen = 0;
}
static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
@@ -604,7 +629,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.enqueue = fq_codel_enqueue,
.dequeue = fq_codel_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = fq_codel_drop,
+ .drop = fq_codel_qdisc_drop,
.init = fq_codel_init,
.reset = fq_codel_reset,
.destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.peek = qdisc_peek_head,
.init = plug_init,
.change = plug_change,
+ .reset = qdisc_reset_queue,
.owner = THIS_MODULE,
};
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
len = qdisc_pkt_len(skb);
slot->backlog -= len;
sfq_dec(q, x);
- kfree_skb(skb);
sch->q.qlen--;
qdisc_qstats_drop(sch);
qdisc_qstats_backlog_dec(sch, skb);
+ kfree_skb(skb);
return len;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
- if (sctp_sk(sk)->subscribe.sctp_data_io_event)
- pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
- "Requested SCTP_SNDRCVINFO event.\n"
- "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
- current->comm, task_pid_nr(current));
-
/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
* if there is no data to be sent or retransmit, the stack will
* immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
if (!req)
goto not_found;
- /* Note: this 'free' request adds it to xprt->bc_pa_list */
- xprt_free_bc_request(req);
+ list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ xprt->bc_alloc_count++;
}
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
- xprt->bc_alloc_count--;
+ xprt_dec_alloc_count(xprt, 1);
spin_unlock(&xprt->bc_pa_lock);
req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
switch (task->tk_status) {
case -EAGAIN:
+ case -ENOBUFS:
break;
default:
dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
case -ECONNABORTED:
case -EADDRINUSE:
case -ENOTCONN:
- case -ENOBUFS:
case -EPIPE:
rpc_task_force_reencode(task);
}
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
case -ECONNABORTED:
rpc_force_rebind(clnt);
case -EADDRINUSE:
- case -ENOBUFS:
rpc_delay(task, 3*HZ);
case -EPIPE:
case -ENOTCONN:
task->tk_action = call_bind;
break;
+ case -ENOBUFS:
+ rpc_delay(task, HZ>>2);
case -EAGAIN:
task->tk_action = call_transmit;
break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
true, &sent);
dprintk("RPC: %s(%u) = %d\n",
__func__, xdr->len - req->rq_bytes_sent, status);
+
+ if (status == -EAGAIN && sock_writeable(transport->inet))
+ status = -ENOBUFS;
+
if (likely(sent > 0) || status == 0) {
req->rq_bytes_sent += sent;
req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
switch (status) {
case -ENOBUFS:
+ break;
case -EAGAIN:
status = xs_nospace(task);
break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
if (status == -EPERM)
goto process_status;
+ if (status == -EAGAIN && sock_writeable(transport->inet))
+ status = -ENOBUFS;
+
if (sent > 0 || status == 0) {
req->rq_xmit_bytes_sent += sent;
if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
xdr->len - req->rq_bytes_sent, status);
- if (unlikely(sent == 0 && status < 0))
- break;
-
/* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
return 0;
}
- if (sent != 0)
- continue;
- status = -EAGAIN;
- break;
+ if (status < 0)
+ break;
+ if (sent == 0) {
+ status = -EAGAIN;
+ break;
+ }
}
+ if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
+ status = -ENOBUFS;
switch (status) {
case -ENOTSOCK:
status = -ENOTCONN;
/* Should we call xs_close() here? */
break;
- case -ENOBUFS:
case -EAGAIN:
status = xs_nospace(task);
break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
case -ECONNREFUSED:
case -ENOTCONN:
case -EADDRINUSE:
+ case -ENOBUFS:
case -EPIPE:
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 84f77a054025..9f2add3cba26 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -171,8 +171,10 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
* released.
*/
- attr->trans = SWITCHDEV_TRANS_ABORT;
- __switchdev_port_attr_set(dev, attr);
+ if (err != -EOPNOTSUPP) {
+ attr->trans = SWITCHDEV_TRANS_ABORT;
+ __switchdev_port_attr_set(dev, attr);
+ }
return err;
}
@@ -249,8 +251,10 @@ int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
* released.
*/
- obj->trans = SWITCHDEV_TRANS_ABORT;
- __switchdev_port_obj_add(dev, obj);
+ if (err != -EOPNOTSUPP) {
+ obj->trans = SWITCHDEV_TRANS_ABORT;
+ __switchdev_port_obj_add(dev, obj);
+ }
return err;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 46b6ed534ef2..3a7567f690f3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
if (res)
goto exit;
+ security_sk_clone(sock->sk, new_sock->sk);
new_sk = new_sock->sk;
new_tsock = tipc_sk(new_sk);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
return false;
}
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype)
+static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype,
+ bool check_no_ir)
{
bool res;
u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_RADAR;
- trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
+ trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
- /*
- * Under certain conditions suggested by some regulatory bodies a
- * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
- * only if such relaxations are not enabled and the conditions are not
- * met.
- */
- if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
+ if (check_no_ir)
prohibited_flags |= IEEE80211_CHAN_NO_IR;
if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
trace_cfg80211_return_bool(res);
return res;
}
+
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
+}
EXPORT_SYMBOL(cfg80211_reg_can_beacon);
+bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ bool check_no_ir;
+
+ ASSERT_RTNL();
+
+ /*
+ * Under certain conditions suggested by some regulatory bodies a
+ * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
+ * only if such relaxations are not enabled and the conditions are not
+ * met.
+ */
+ check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
+ chandef->chan);
+
+ return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+}
+EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
+
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef)
{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
switch (iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
+ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
+ iftype)) {
result = -EINVAL;
break;
}
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
} else if (!nl80211_get_ap_channel(rdev, &params))
return -EINVAL;
- if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
- wdev->iftype))
+ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
+ wdev->iftype))
return -EINVAL;
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
if (err)
return err;
- if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
- wdev->iftype))
+ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
+ wdev->iftype))
return -EINVAL;
err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
return -EINVAL;
/* we will be active on the TDLS link */
- if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype))
+ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
+ wdev->iftype))
return -EINVAL;
/* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
reg_regdb_query(alpha2);
if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
- pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n");
+ pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
return -EINVAL;
}
if (!is_world_regdom((char *) alpha2))
- pr_info("Calling CRDA for country: %c%c\n",
+ pr_debug("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
else
- pr_info("Calling CRDA to update world regulatory domain\n");
+ pr_debug("Calling CRDA to update world regulatory domain\n");
return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_ADHOC:
- return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
+ return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
TRACE_EVENT(cfg80211_reg_can_beacon,
TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype),
- TP_ARGS(wiphy, chandef, iftype),
+ enum nl80211_iftype iftype, bool check_no_ir),
+ TP_ARGS(wiphy, chandef, iftype, check_no_ir),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_DEF_ENTRY
__field(enum nl80211_iftype, iftype)
+ __field(bool, check_no_ir)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->iftype = iftype;
+ __entry->check_no_ir = check_no_ir;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
+ WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
+ BOOL_TO_STR(__entry->check_no_ir))
);
TRACE_EVENT(cfg80211_chandef_dfs_required,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 8965d1bb8811..125d6402f64f 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -168,7 +168,10 @@
*
* For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
* Use __get_dynamic_array_len(foo) to get the length of the array
- * saved.
+ * saved. Note, __get_dynamic_array_len() returns the total allocated
+ * length of the dynamic array; __print_array() expects the second
+ * parameter to be the number of elements. To get that, the array length
+ * needs to be divided by the element size.
*
* For __string(foo, bar) use __get_str(foo)
*
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
* This prints out the array that is defined by __array in a nice format.
*/
__print_array(__get_dynamic_array(list),
- __get_dynamic_array_len(list),
+ __get_dynamic_array_len(list) / sizeof(int),
sizeof(int)),
__get_str(str), __get_bitmask(cpus))
);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 90e1edc8dd42..d5c8e9a3a73c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2599,7 +2599,7 @@ sub process {
# if LONG_LINE is ignored, the other 2 types are also ignored
#
- if ($length > $max_line_length) {
+ if ($line =~ /^\+/ && $length > $max_line_length) {
my $msg_type = "LONG_LINE";
# Check the allowed long line types first
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
my $kconfig = $ARGV[1];
my $lsmod_file = $ENV{'LSMOD'};
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
chomp @makefiles;
my %depends;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
- if (edit && !edit->dead_leaf) {
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
+ if (edit) {
+ if (!edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ }
assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
*/
if (!security_module_enable("yama"))
return 0;
+ yama_add_hooks();
#endif
pr_info("Yama: becoming mindful.\n");
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index d126c03361ae..75888dd38a7f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
if (substream->pcm->nonatomic) {
- down_read(&snd_pcm_link_rwsem);
+ down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
mutex_lock(&substream->self_group.mutex);
} else {
read_lock(&snd_pcm_link_rwlock);
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
s->data_block_counter != UINT_MAX)
data_block_counter = s->data_block_counter;
- if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
- (s->data_block_counter == UINT_MAX)) {
+ if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+ data_block_counter == s->tx_first_dbc) ||
+ s->data_block_counter == UINT_MAX) {
lost = false;
} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
/* quirk: fixed interval of dbc between previos/current packets. */
unsigned int tx_dbc_interval;
+ /* quirk: indicate the value of dbc field in a first packet. */
+ unsigned int tx_first_dbc;
bool callbacked;
wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
err = get_hardware_info(efw);
if (err < 0)
goto error;
+ /* AudioFire8 (since 2009) and AudioFirePre8 */
if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
efw->is_af9 = true;
+ /* These models uses the same firmware. */
+ if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+ entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+ entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+ entry->model_id == MODEL_GIBSON_RIP ||
+ entry->model_id == MODEL_GIBSON_GOLDTOP)
+ efw->is_fireworks3 = true;
snd_efw_proc_init(efw);
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
/* for quirks */
bool is_af9;
+ bool is_fireworks3;
u32 firmware_version;
unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
/* Fireworks reset dbc at bus reset. */
efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
+ /*
+ * But Recent firmwares starts packets with non-zero dbc.
+ * Driver version 5.7.6 installs firmware version 5.7.3.
+ */
+ if (efw->is_fireworks3 &&
+ (efw->firmware_version == 0x5070000 ||
+ efw->firmware_version == 0x5070300 ||
+ efw->firmware_version == 0x5080000))
+ efw->tx_stream.tx_first_dbc = 0x02;
/* AudioFire9 always reports wrong dbs. */
if (efw->is_af9)
efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
offset = snd_hdac_chip_readl(bus, LLCH);
- if (offset < 0)
- return -EIO;
-
/* Lets walk the linked capabilities list */
do {
cur_cap = _snd_hdac_chip_read(l, bus, offset);
- if (cur_cap < 0)
- return -EIO;
-
dev_dbg(bus->dev, "Capability version: 0x%x\n",
((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
if (stream->direction != substream->stream)
continue;
- if (stream->opened) {
+ if (!stream->opened) {
if (!hstream->decoupled)
snd_hdac_ext_stream_decouple(ebus, hstream, true);
res = hstream;
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 442500e06b7c..5676b849379d 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
enable ? "enable" : "disable");
if (enable) {
- if (!bus->i915_power_refcount++)
+ if (!bus->i915_power_refcount++) {
acomp->ops->get_power(acomp->dev);
+ snd_hdac_set_codec_wakeup(bus, true);
+ snd_hdac_set_codec_wakeup(bus, false);
+ }
} else {
WARN_ON(!bus->i915_power_refcount);
if (!--bus->i915_power_refcount)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac0db1679f09..b077bb644434 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -5175,7 +5175,7 @@ static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo,
int err = 0;
mutex_lock(&spec->pcm_mutex);
- if (!spec->indep_hp_enabled)
+ if (spec->indep_hp && !spec->indep_hp_enabled)
err = -EBUSY;
else
spec->active_streams |= 1 << STREAM_INDEP_HP;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 745535d1840a..c38c68f57938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
- if (chip->disabled || hda->init_failed)
+ if (chip->disabled || hda->init_failed || !chip->running)
return 0;
bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
- if (chip->disabled || hda->init_failed)
+ if (chip->disabled || hda->init_failed || !chip->running)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
if (!azx_has_pm_runtime(chip))
return 0;
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
- && hda->need_i915_power) {
- bus = azx_bus(chip);
- snd_hdac_display_power(bus, true);
- haswell_set_bclk(hda);
- /* toggle codec wakeup bit for STATESTS read */
- snd_hdac_set_codec_wakeup(bus, true);
- snd_hdac_set_codec_wakeup(bus, false);
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ bus = azx_bus(chip);
+ if (hda->need_i915_power) {
+ snd_hdac_display_power(bus, true);
+ haswell_set_bclk(hda);
+ } else {
+ /* toggle codec wakeup bit for STATESTS read */
+ snd_hdac_set_codec_wakeup(bus, true);
+ snd_hdac_set_codec_wakeup(bus, false);
+ }
}
/* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
return 0;
if (!power_save_controller || !azx_has_pm_runtime(chip) ||
- azx_bus(chip)->codec_powered)
+ azx_bus(chip)->codec_powered || !chip->running)
return -EBUSY;
return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x1308),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0x157a),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x793b),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaab0),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaac0),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaac8),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaad8),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0xaae8),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288),
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 25ccf781fbe7..584a0343ab0c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
spec->spdif_present = spdif_present;
/* SPDIF TX on/off */
- if (spdif_present)
- snd_hda_set_pin_ctl(codec, spdif_pin,
- spdif_present ? PIN_OUT : 0);
+ snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
cs_automute(codec);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2f2433845d04..a97db5fc8a15 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3527,6 +3528,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x8086280a, .name = "Broxton HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
@@ -3575,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
MODULE_ALIAS("snd-hda-codec-id:10de0072");
+MODULE_ALIAS("snd-hda-codec-id:10de007d");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
MODULE_ALIAS("snd-hda-codec-id:11069f81");
@@ -3591,6 +3594,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862806");
MODULE_ALIAS("snd-hda-codec-id:80862807");
MODULE_ALIAS("snd-hda-codec-id:80862808");
MODULE_ALIAS("snd-hda-codec-id:80862809");
+MODULE_ALIAS("snd-hda-codec-id:8086280a");
MODULE_ALIAS("snd-hda-codec-id:80862880");
MODULE_ALIAS("snd-hda-codec-id:80862882");
MODULE_ALIAS("snd-hda-codec-id:80862883");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b3b44681d3cf..374ea53288ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
- SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -4441,6 +4441,55 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
}
}
+/* Hook to update amp GPIO4 for automute */
+static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+
+ snd_hda_gen_hp_automute(codec, jack);
+ /* mute_led_polarity is set to 0, so we pass inverted value here */
+ alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
+}
+
+/* Manage GPIOs for HP EliteBook Folio 9480m.
+ *
+ * GPIO4 is the headphone amplifier power control
+ * GPIO3 is the audio output mute indicator LED
+ */
+
+static void alc280_fixup_hp_9480m(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_verb gpio_init[] = {
+ { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
+ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
+ {}
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ /* Set the hooks to turn the headphone amp on/off
+ * as needed
+ */
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
+
+ /* The GPIOs are currently off */
+ spec->gpio_led = 0;
+
+ /* GPIO3 is connected to the output mute LED,
+ * high is on, low is off
+ */
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x08;
+
+ /* Initialize GPIO configuration */
+ snd_hda_add_verbs(codec, gpio_init);
+ }
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -4521,6 +4570,7 @@ enum {
ALC286_FIXUP_HP_GPIO_LED,
ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
ALC280_FIXUP_HP_DOCK_PINS,
+ ALC280_FIXUP_HP_9480M,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC288_FIXUP_DELL_XPS_13_GPIO6,
@@ -5011,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
{ 0x14, 0x90170110 },
{ 0x17, 0x40000008 },
{ 0x18, 0x411111f0 },
- { 0x19, 0x411111f0 },
+ { 0x19, 0x01a1913c },
{ 0x1a, 0x411111f0 },
{ 0x1b, 0x411111f0 },
{ 0x1d, 0x40f89b2d },
@@ -5043,6 +5093,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC280_FIXUP_HP_GPIO4
},
+ [ALC280_FIXUP_HP_9480M] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc280_fixup_hp_9480m,
+ },
[ALC288_FIXUP_DELL_HEADSET_MODE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode_dell_alc288,
@@ -5131,9 +5185,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+ SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5161,6 +5218,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
/* ALC290 */
@@ -5234,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5343,8 +5402,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x19, 0x411111f0}, \
{0x1a, 0x411111f0}, \
{0x1b, 0x411111f0}, \
- {0x1d, 0x40700001}, \
- {0x1e, 0x411111f0}, \
{0x21, 0x02211020}
#define ALC282_STANDARD_PINS \
@@ -5375,8 +5432,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x15, 0x0221401f}, \
{0x1a, 0x411111f0}, \
{0x1b, 0x411111f0}, \
- {0x1d, 0x40700001}, \
- {0x1e, 0x411111f0}
+ {0x1d, 0x40700001}
#define ALC298_STANDARD_PINS \
{0x18, 0x411111f0}, \
@@ -5408,6 +5464,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1d, 0x40700001},
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x40000000},
+ {0x14, 0x90170130},
+ {0x17, 0x411111f0},
+ {0x18, 0x411111f0},
+ {0x19, 0x411111f0},
+ {0x1a, 0x411111f0},
+ {0x1b, 0x01014020},
+ {0x1d, 0x4054c029},
+ {0x1e, 0x411111f0},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x40000000},
+ {0x14, 0x90170150},
+ {0x17, 0x411111f0},
+ {0x18, 0x411111f0},
+ {0x19, 0x411111f0},
+ {0x1a, 0x411111f0},
+ {0x1b, 0x02011020},
+ {0x1d, 0x4054c029},
+ {0x1e, 0x411111f0},
+ {0x21, 0x0221105f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x40000000},
+ {0x14, 0x90170110},
+ {0x17, 0x411111f0},
+ {0x18, 0x411111f0},
+ {0x19, 0x411111f0},
+ {0x1a, 0x411111f0},
+ {0x1b, 0x01014020},
+ {0x1d, 0x4054c029},
+ {0x1e, 0x411111f0},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
{0x14, 0x90170120},
{0x17, 0x90170140},
@@ -5469,10 +5558,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS,
- {0x13, 0x40000000}),
+ {0x13, 0x40000000},
+ {0x1d, 0x40700001},
+ {0x1e, 0x411111f0}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC256_STANDARD_PINS,
+ {0x13, 0x411111f0},
+ {0x1d, 0x40700001},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS,
- {0x13, 0x411111f0}),
+ {0x13, 0x411111f0},
+ {0x1d, 0x4077992d},
+ {0x1e, 0x411111ff}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x13, 0x40000000},
@@ -5635,35 +5733,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x13, 0x411111f0},
{0x16, 0x01014020},
{0x18, 0x411111f0},
- {0x19, 0x01a19030}),
+ {0x19, 0x01a19030},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x12, 0x90a60140},
{0x13, 0x411111f0},
{0x16, 0x01014020},
{0x18, 0x02a19031},
- {0x19, 0x01a1903e}),
+ {0x19, 0x01a1903e},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x12, 0x90a60140},
{0x13, 0x411111f0},
{0x16, 0x411111f0},
{0x18, 0x411111f0},
- {0x19, 0x411111f0}),
+ {0x19, 0x411111f0},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x12, 0x40000000},
{0x13, 0x90a60140},
{0x16, 0x21014020},
{0x18, 0x411111f0},
- {0x19, 0x21a19030}),
+ {0x19, 0x21a19030},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x12, 0x40000000},
{0x13, 0x90a60140},
{0x16, 0x411111f0},
{0x18, 0x411111f0},
- {0x19, 0x411111f0}),
+ {0x19, 0x411111f0},
+ {0x1e, 0x411111f0}),
+ SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
+ {0x12, 0x40000000},
+ {0x13, 0x90a60140},
+ {0x16, 0x21014020},
+ {0x18, 0x411111f0},
+ {0x19, 0x21a19030},
+ {0x1e, 0x411111ff}),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x12, 0x90a60130},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index dcc7fe91244c..9d947aef2c8b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
"HP Mini", STAC_92HD83XXX_HP_LED),
SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
- SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+ /* match both for 0xfa91 and 0xfa93 */
+ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
"Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
{} /* terminator */
};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
int changed;
mutex_lock(&chip->mutex);
- changed = !value->value.integer.value[0] != chip->dac_mute;
+ changed = (!value->value.integer.value[0]) != chip->dac_mute;
if (changed) {
chip->dac_mute = !value->value.integer.value[0];
chip->model.update_dac_mute(chip);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619443d1..1d651b8a8957 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
bool
select SND_DMAENGINE_PCM
+config SND_SOC_TOPOLOGY
+ bool
+
# All the supported SoCs
source "sound/soc/adi/Kconfig"
source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903fabf4..669648b41d30 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
snd-soc-core-objs += soc-topology.o
+endif
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
case SND_SOC_DAIFMT_RIGHT_J:
if (params_width(params) == 16) {
snd_soc_update_bits(codec, CS4265_DAC_CTL,
- CS4265_DAC_CTL_DIF, (1 << 5));
+ CS4265_DAC_CTL_DIF, (2 << 4));
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 7));
+ CS4265_SPDIF_CTL2_DIF, (2 << 6));
} else {
snd_soc_update_bits(codec, CS4265_DAC_CTL,
- CS4265_DAC_CTL_DIF, (3 << 5));
+ CS4265_DAC_CTL_DIF, (3 << 4));
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 7));
+ CS4265_SPDIF_CTL2_DIF, (3 << 6));
}
break;
case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_update_bits(codec, CS4265_ADC_CTL,
CS4265_ADC_DIF, 0);
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 6));
+ CS4265_SPDIF_CTL2_DIF, 0);
break;
default:
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 477e13d30971..e7ba557979cb 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
if (val != -1) {
regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
- PCM1681_DEEMPH_RATE_MASK, val);
+ PCM1681_DEEMPH_RATE_MASK, val << 3);
enable = 1;
} else
enable = 0;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 9ce311e088fc..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
{
int val, btn_type, gpio_state = 0, report = 0;
+ if (!rt5645->codec)
+ return -EINVAL;
+
switch (rt5645->pdata.jd_mode) {
case 0: /* Not using rt5645 JD */
if (rt5645->gpiod_hp_det) {
@@ -3338,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
break;
case RT5645_DMIC_DATA_GPIO5:
+ regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
+ RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
#define RT5645_GP6_PIN_SFT 6
#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
+#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
+#define RT5645_I2S2_DAC_PIN_SFT 4
+#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
+#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
#define RT5645_GP8_PIN_MASK (0x1 << 3)
#define RT5645_GP8_PIN_SFT 3
#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index bd7a344bf8c5..1c317de26176 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
#define SGTL5000_BIAS_CTRL_MASK 0x000e
#define SGTL5000_BIAS_CTRL_SHIFT 1
#define SGTL5000_BIAS_CTRL_WIDTH 3
-#define SGTL5000_SMALL_POP 0
+#define SGTL5000_SMALL_POP 1
/*
* SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 938d2cb6d78b..84a4f5ad8064 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
if (invert_fclk)
ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
- return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+ return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+ SSM4567_SAI_CTRL_1_BCLK |
+ SSM4567_SAI_CTRL_1_FSYNC |
+ SSM4567_SAI_CTRL_1_LJ |
+ SSM4567_SAI_CTRL_1_TDM |
+ SSM4567_SAI_CTRL_1_PDM,
+ ctrl1);
}
static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c7647e066cfd..c0b940e2019f 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
sub *= 100000;
do_div(sub, freq);
- if (sub < savesub) {
+ if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
baudrate = tmprate;
savesub = sub;
pm = i;
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 3853ec2ddbc7..6de5d5cd3280 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
# Machine support
-obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
+obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 620da1d1b9e3..0e0e4d9c021f 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
#define MIN_FRAGMENT_SIZE (50 * 1024)
#define MAX_FRAGMENT_SIZE (1024 * 1024)
#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
+#ifdef CONFIG_PM
+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
+#else
+#define GET_USAGE_COUNT(dev) 1
+#endif
int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
{
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
int ret = 0;
int usage_count = 0;
-#ifdef CONFIG_PM
- usage_count = atomic_read(&dev->power.usage_count);
-#else
- usage_count = 1;
-#endif
-
if (state == true) {
ret = pm_runtime_get_sync(dev);
-
+ usage_count = GET_USAGE_COUNT(dev);
dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
if (ret < 0) {
dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
}
}
} else {
+ usage_count = GET_USAGE_COUNT(dev);
dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
return sst_pm_runtime_put(ctx);
}
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (byt == NULL)
return -ENOMEM;
+ byt->dev = dev;
+
ipc = &byt->ipc;
ipc->dev = dev;
ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d604ee80eda4..70f832114a5a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
{"Headphone", NULL, "HPR"},
{"Ext Spk", NULL, "SPKL"},
{"Ext Spk", NULL, "SPKR"},
- {"AIF1 Playback", NULL, "ssp2 Tx"},
+ {"HiFi Playback", NULL, "ssp2 Tx"},
{"ssp2 Tx", NULL, "codec_out0"},
{"ssp2 Tx", NULL, "codec_out1"},
{"codec_in0", NULL, "ssp2 Rx" },
{"codec_in1", NULL, "ssp2 Rx" },
- {"ssp2 Rx", NULL, "AIF1 Capture"},
+ {"ssp2 Rx", NULL, "HiFi Capture"},
};
static const struct snd_kcontrol_new cht_mc_controls[] = {
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (hsw == NULL)
return -ENOMEM;
+ hsw->dev = dev;
+
ipc = &hsw->ipc;
ipc->dev = dev;
ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 4d44b5803e55..2d2536af141f 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
.name = "MAX98090 Playback",
.stream_name = "MAX98090 Playback",
.cpu_dai_name = "DL1",
- .platform_name = "11220000.mt8173-afe-pcm",
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
.name = "MAX98090 Capture",
.stream_name = "MAX98090 Capture",
.cpu_dai_name = "VUL",
- .platform_name = "11220000.mt8173-afe-pcm",
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
{
.name = "Codec",
.cpu_dai_name = "I2S",
- .platform_name = "11220000.mt8173-afe-pcm",
.no_pcm = 1,
.codec_dai_name = "HiFi",
.init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
static int mt8173_max98090_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_max98090_card;
- struct device_node *codec_node;
+ struct device_node *codec_node, *platform_node;
int ret, i;
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_max98090_dais[i].platform_name)
+ continue;
+ mt8173_max98090_dais[i].platform_of_node = platform_node;
+ }
+
codec_node = of_parse_phandle(pdev->dev.of_node,
"mediatek,audio-codec", 0);
if (!codec_node) {
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 094055323059..6f52eca05e26 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
.name = "rt5650_rt5676 Playback",
.stream_name = "rt5650_rt5676 Playback",
.cpu_dai_name = "DL1",
- .platform_name = "11220000.mt8173-afe-pcm",
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
.name = "rt5650_rt5676 Capture",
.stream_name = "rt5650_rt5676 Capture",
.cpu_dai_name = "VUL",
- .platform_name = "11220000.mt8173-afe-pcm",
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
{
.name = "Codec",
.cpu_dai_name = "I2S",
- .platform_name = "11220000.mt8173-afe-pcm",
.no_pcm = 1,
.codecs = mt8173_rt5650_rt5676_codecs,
.num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
- int ret;
+ struct device_node *platform_node;
+ int i, ret;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_rt5650_rt5676_dais[i].platform_name)
+ continue;
+ mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+ }
mt8173_rt5650_rt5676_codecs[0].of_node =
of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index cc228db5fb76..9863da73dfe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -1199,6 +1199,8 @@ err_pm_disable:
static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mtk_afe_runtime_suspend(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
return 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3a4a5c0e3f97..0e1e69c7abd5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1716,6 +1716,7 @@ card_probe_error:
if (card->remove)
card->remove(card);
+ snd_soc_dapm_free(&card->dapm);
soc_cleanup_card_debugfs(card);
snd_card_free(card->snd_card);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index aa327c92480c..e0de8072c514 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
data->widget =
snd_soc_dapm_new_control_unlocked(widget->dapm,
&template);
+ kfree(name);
if (!data->widget) {
ret = -ENOMEM;
- goto err_name;
+ goto err_data;
}
}
break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
data->value = template.on_val;
- data->widget = snd_soc_dapm_new_control(widget->dapm,
- &template);
+ data->widget = snd_soc_dapm_new_control_unlocked(
+ widget->dapm, &template);
+ kfree(name);
if (!data->widget) {
ret = -ENOMEM;
- goto err_name;
+ goto err_data;
}
snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
return 0;
-err_name:
- kfree(name);
err_data:
kfree(data);
return ret;
@@ -418,8 +418,6 @@ err_data:
static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
- if (data->widget)
- kfree(data->widget->name);
kfree(data->wlist);
kfree(data);
}
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
size_t count, loff_t *ppos)
{
struct snd_soc_dapm_widget *w = file->private_data;
+ struct snd_soc_card *card = w->dapm->card;
char *buf;
int in, out;
ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!buf)
return -ENOMEM;
+ mutex_lock(&card->dapm_mutex);
+
/* Supply widgets are not handled by is_connected_{input,output}_ep() */
if (w->is_supply) {
in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
p->sink->name);
}
+ mutex_unlock(&card->dapm_mutex);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
int i, count = 0;
+ mutex_lock(&rtd->card->dapm_mutex);
+
for (i = 0; i < rtd->num_codecs; i++) {
struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
count += dapm_widget_show_codec(codec, buf + count);
}
+ mutex_unlock(&rtd->card->dapm_mutex);
+
return count;
}
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
}
prefix = soc_dapm_prefix(dapm);
- if (prefix) {
+ if (prefix)
w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
- if (widget->sname)
- w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
- widget->sname);
- } else {
+ else
w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
- if (widget->sname)
- w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
- }
if (w->name == NULL) {
kfree(w);
return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
break;
}
- if (!w->sname || !strstr(w->sname, dai_w->name))
+ if (!w->sname || !strstr(w->sname, dai_w->sname))
continue;
if (dai_w->id == snd_soc_dapm_dai_in) {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d0960683c409..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/soc-topology.h>
+#include <sound/tlv.h>
/*
* We make several passes over the data (since it wont necessarily be ordered)
@@ -144,7 +145,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
{SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
snd_soc_put_strobe, NULL},
{SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
- snd_soc_dapm_put_volsw, NULL},
+ snd_soc_dapm_put_volsw, snd_soc_info_volsw},
{SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
{SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
k->put = bops[i].put;
if (k->get == NULL && bops[i].id == hdr->ops.get)
k->get = bops[i].get;
- if (k->info == NULL && ops[i].id == hdr->ops.info)
+ if (k->info == NULL && bops[i].id == hdr->ops.info)
k->info = bops[i].info;
}
@@ -579,29 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
return 0;
}
+
+static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
+ struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
+{
+ unsigned int item_len = 2 * sizeof(unsigned int);
+ unsigned int *p;
+
+ p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p[0] = SNDRV_CTL_TLVT_DB_SCALE;
+ p[1] = item_len;
+ p[2] = scale->min;
+ p[3] = (scale->step & TLV_DB_SCALE_MASK)
+ | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
+
+ kc->tlv.p = (void *)p;
+ return 0;
+}
+
static int soc_tplg_create_tlv(struct soc_tplg *tplg,
- struct snd_kcontrol_new *kc, u32 tlv_size)
+ struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
{
struct snd_soc_tplg_ctl_tlv *tplg_tlv;
- struct snd_ctl_tlv *tlv;
- if (tlv_size == 0)
+ if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
return 0;
- tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos;
- tplg->pos += tlv_size;
-
- tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL);
- if (tlv == NULL)
- return -ENOMEM;
-
- dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
- tplg_tlv->numid, tplg_tlv->size);
+ if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+ kc->tlv.c = snd_soc_bytes_tlv_callback;
+ } else {
+ tplg_tlv = &tc->tlv;
+ switch (tplg_tlv->type) {
+ case SNDRV_CTL_TLVT_DB_SCALE:
+ return soc_tplg_create_tlv_db_scale(tplg, kc,
+ &tplg_tlv->scale);
- tlv->numid = tplg_tlv->numid;
- tlv->length = tplg_tlv->size;
- memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size);
- kc->tlv.p = (void *)tlv;
+ /* TODO: add support for other TLV types */
+ default:
+ dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
+ tplg_tlv->type);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -773,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
}
/* create any TLV data */
- soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size);
+ soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
/* register control here */
err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1351,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
template.reg = w->reg;
template.shift = w->shift;
template.mask = w->mask;
+ template.subseq = w->subseq;
template.on_val = w->invert ? 0 : 1;
template.off_val = w->invert ? 1 : 0;
template.ignore_suspend = w->ignore_suspend;
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 98d96e1b17e0..1930c42e1f55 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
zx_i2s->mapbase = res->start;
zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!zx_i2s->reg_base) {
+ if (IS_ERR(zx_i2s->reg_base)) {
dev_err(&pdev->dev, "ioremap failed!\n");
- return -EIO;
+ return PTR_ERR(zx_i2s->reg_base);
}
writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
index 11a0e46a1156..26265ce4caca 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx296702-spdif.c
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
zx_spdif->mapbase = res->start;
zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!zx_spdif->reg_base) {
+ if (IS_ERR(zx_spdif->reg_base)) {
dev_err(&pdev->dev, "ioremap failed!\n");
- return -EIO;
+ return PTR_ERR(zx_spdif->reg_base);
}
zx_spdif_dev_init(zx_spdif->reg_base);
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 1b1a89e80d13..784ceb85b2d9 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
if (!amd->regs) {
snd_printk(KERN_ERR
"amd7930-%d: Unable to map chip registers.\n", dev);
+ kfree(amd);
return -EIO;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab9778807a..0450593980fd 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (chip->probing && chip->in_pm)
+ if (chip->probing || chip->in_pm)
err = 0;
else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 8461d6bf992f..204cc074adb9 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
int ret = 0;
spin_lock_irqsave(&pstr->lock, flags);
- if (!test_and_set_bit(type, &pstr->running)) {
- if (pstr->active_urbs || pstr->unlink_urbs) {
- ret = -EBUSY;
- goto error;
- }
-
+ if (!test_and_set_bit(type, &pstr->running) &&
+ !(pstr->active_urbs || pstr->unlink_urbs)) {
pstr->count = 0;
/* Submit all currently available URBs */
if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
else
ret = line6_submit_audio_in_all_urbs(line6pcm);
}
- error:
if (ret < 0)
clear_bit(type, &pstr->running);
spin_unlock_irqrestore(&pstr->lock, flags);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e5000da9e9d7..6a803eff87f7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
{ 0 }
};
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static struct usbmix_name_map bose_companion5_map[] = {
+ { 3, NULL, .dB = &bose_companion5_dB },
+ { 0 } /* terminator */
+};
+
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+static struct usbmix_name_map dragonfly_1_2_map[] = {
+ { 7, NULL, .dB = &dragonfly_1_2_dB },
+ { 0 } /* terminator */
+};
+
/*
* Control map entries
*/
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.id = USB_ID(0x25c4, 0x0003),
.map = scms_usb3318_map,
},
+ {
+ /* Bose Companion 5 */
+ .id = USB_ID(0x05a7, 0x1020),
+ .map = bose_companion5_map,
+ },
+ {
+ /* Dragonfly DAC 1.2 */
+ .id = USB_ID(0x21b4, 0x0081),
+ .map = dragonfly_1_2_map,
+ },
{ 0 } /* terminator */
};
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 2f6d3e9a1bcd..e4756651a52c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Steinberg devices */
+{
+ /* Steinberg MI2 */
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
+ /* Steinberg MI4 */
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
/* TerraTec devices */
{
USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 8bd960658463..fe1b02c2c95b 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -36,7 +36,7 @@ $(LIBFILE): $(API_IN)
clean:
$(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
- find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM)
+ find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
FORCE:
diff --git a/tools/lib/hweight.c b/tools/lib/hweight.c
new file mode 100644
index 000000000000..0b859b884339
--- /dev/null
+++ b/tools/lib/hweight.c
@@ -0,0 +1,62 @@
+#include <linux/bitops.h>
+#include <asm/types.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int __sw_hweight32(unsigned int w)
+{
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x55555555;
+ w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
+ w = (w + (w >> 4)) & 0x0f0f0f0f;
+ return (w * 0x01010101) >> 24;
+#else
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+#endif
+}
+
+unsigned int __sw_hweight16(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x5555);
+ res = (res & 0x3333) + ((res >> 2) & 0x3333);
+ res = (res + (res >> 4)) & 0x0F0F;
+ return (res + (res >> 8)) & 0x00FF;
+}
+
+unsigned int __sw_hweight8(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res + (res >> 4)) & 0x0F;
+}
+
+unsigned long __sw_hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return __sw_hweight32((unsigned int)(w >> 32)) +
+ __sw_hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x5555555555555555ul;
+ w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
+ w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
+ return (w * 0x0101010101010101ul) >> 56;
+#else
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+#endif
+}
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 6daaff652aff..7851df1490e0 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -268,7 +268,7 @@ install: install_lib
clean:
$(call QUIET_CLEAN, libtraceevent) \
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
$(RM) TRACEEVENT-CFLAGS tags TAGS
PHONY += force plugins
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 09dc0aabb515..d01a0aad5a01 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
tools/arch/x86/include/asm/rmwcc.h
tools/lib/traceevent
tools/lib/api
+tools/lib/hweight.c
tools/lib/rbtree.c
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
@@ -57,7 +58,6 @@ include/linux/perf_event.h
include/linux/list.h
include/linux/hash.h
include/linux/stringify.h
-lib/hweight.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
arch/*/include/uapi/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 7a4b549214e3..bba34636b733 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -109,9 +109,22 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
$(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
$(Q)touch $(OUTPUT)PERF-VERSION-FILE
-CC = $(CROSS_COMPILE)gcc
-LD ?= $(CROSS_COMPILE)ld
-AR = $(CROSS_COMPILE)ar
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+ $(if $(or $(findstring environment,$(origin $(1))),\
+ $(findstring command line,$(origin $(1)))),,\
+ $(eval $(1) = $(2)))
+endef
+
+# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
+
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
RM = rm -f
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1b9240..20b56eb987f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ /*
+ * Normally perf_session__new would do this, but it doesn't have the
+ * evlist.
+ */
+ if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+ pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ rec->tool.ordered_events = false;
+ }
+
if (!rec->evlist->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
@@ -965,9 +974,11 @@ static struct record record = {
.tool = {
.sample = process_sample_event,
.fork = perf_event__process_fork,
+ .exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
+ .ordered_events = true,
},
};
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 37e301a32f43..d99d850e1444 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -343,7 +343,7 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
-static void read_counters(bool close)
+static void read_counters(bool close_counters)
{
struct perf_evsel *counter;
@@ -354,7 +354,7 @@ static void read_counters(bool close)
if (process_counter(counter))
pr_warning("failed to process counter %s\n", counter->name);
- if (close) {
+ if (close_counters) {
perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
thread_map__nr(evsel_list->threads));
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf319728f25..6135cc07213c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
static void display_setup_sig(void)
{
- signal(SIGSEGV, display_sig);
- signal(SIGFPE, display_sig);
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
signal(SIGINT, display_sig);
signal(SIGQUIT, display_sig);
signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
prefix ?= $(HOME)
endif
bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 7629bef2fd79..fa67613976a8 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -48,7 +48,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
static bool hist_browser__has_filter(struct hist_browser *hb)
{
- return hists__has_filter(hb->hists) || hb->min_pcnt;
+ return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
}
static int hist_browser__get_folding(struct hist_browser *browser)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 601d11440596..d2d318c59b37 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -143,6 +143,6 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
-$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE
+$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 7e7405c9b936..83d9dd96fe08 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -53,11 +53,6 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
{
struct perf_event_mmap_page *pc = userpg;
-#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
- pr_err("Cannot use AUX area tracing mmaps\n");
- return -1;
-#endif
-
WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
mm->userpg = userpg;
@@ -73,6 +68,11 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
return 0;
}
+#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+ pr_err("Cannot use AUX area tracing mmaps\n");
+ return -1;
+#endif
+
pc->aux_offset = mp->offset;
pc->aux_size = mp->len;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff682770fdb..f1a4c833121e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
event->fork.ptid);
int err = 0;
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ /*
+ * There may be an existing thread that is not actually the parent,
+ * either because we are processing events out of order, or because the
+ * (fork) event that would have removed the thread was lost. Assume the
+ * latter case and continue on as best we can.
+ */
+ if (parent->pid_ != (pid_t)event->fork.ppid) {
+ dump_printf("removing erroneous parent thread %d/%d\n",
+ parent->pid_, parent->tid);
+ machine__remove_thread(machine, parent);
+ thread__put(parent);
+ parent = machine__findnew_thread(machine, event->fork.ppid,
+ event->fork.ptid);
+ }
+
/* if a thread currently exists for the thread id remove it */
if (thread != NULL) {
machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
- if (dump_trace)
- perf_event__fprintf_task(event, stdout);
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e23ded40c79e..0766d98c5da5 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
-../../lib/hweight.c
+../lib/hweight.c
util/thread_map.c
util/util.c
util/xyarray.c
@@ -19,5 +19,5 @@ util/rblist.c
util/stat.c
util/strlist.c
util/trace-event.c
-../../lib/rbtree.c
+../lib/rbtree.c
util/string.c
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
" # %5.2f%% aborted cycles ",
100.0 * ((total2-avg) / total));
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / transaction ", ratio);
} else if (perf_stat_evsel__is(evsel, ELISION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48b588c6951a..60f11414bb5c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1911,6 +1911,8 @@ int setup_list(struct strlist **list, const char *list_str,
pr_err("problems parsing %s list\n", list_name);
return -1;
}
+
+ symbol_conf.has_filter = true;
return 0;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index bef47ead1d9b..b98ce51af142 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -105,7 +105,8 @@ struct symbol_conf {
demangle_kernel,
filter_relative,
show_hist_headers,
- branch_callstack;
+ branch_callstack,
+ has_filter;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b746baa1..0a9ae8014729 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
if (thread->pid_ == parent->pid_)
return 0;
+ if (thread->mg == parent->mg) {
+ pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
+ thread->pid_, thread->tid, parent->pid_, parent->tid);
+ return 0;
+ }
+
/* But this one is new process, copy maps. */
for (i = 0; i < MAP__NR_TYPES; ++i)
if (map_groups__clone(thread->mg, parent->mg, i) < 0)
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index da7646d767fe..292ae2c90e06 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -136,8 +136,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
if (grow) {
struct thread_map *tmp;
- tmp = realloc(threads, (sizeof(*threads) +
- max_threads * sizeof(pid_t)));
+ tmp = thread_map__realloc(threads, max_threads);
if (tmp == NULL)
goto out_free_namelist;
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 4b89118f158d..44d440da15dc 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -236,18 +236,16 @@ static struct dso *__machine__findnew_compat(struct machine *machine,
const char *file_name;
struct dso *dso;
- pthread_rwlock_wrlock(&machine->dsos.lock);
dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
if (dso)
- goto out_unlock;
+ goto out;
file_name = vdso__get_compat_file(vdso_file);
if (!file_name)
- goto out_unlock;
+ goto out;
dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
-out_unlock:
- pthread_rwlock_unlock(&machine->dsos.lock);
+out:
return dso;
}
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
if (res > 0) {
atomic_set(&requeued, 1);
break;
- } else if (res > 0) {
+ } else if (res < 0) {
error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
ret = RET_ERROR;
break;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 620e37f741b8..1dd087da6f31 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
list_add_tail(&kvg->node, &kv->group_list);
kvg->vfio_group = vfio_group;
+ kvm_arch_start_assignment(dev->kvm);
+
mutex_unlock(&kv->lock);
kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
break;
}
+ kvm_arch_end_assignment(dev->kvm);
+
mutex_unlock(&kv->lock);
kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kvm_vfio_group_put_external_user(kvg->vfio_group);
list_del(&kvg->node);
kfree(kvg);
+ kvm_arch_end_assignment(dev->kvm);
}
kvm_vfio_update_coherency(dev);