diff options
472 files changed, 5779 insertions, 9964 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 864637f25bee..3a4abfc44f5e 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -1,3 +1,23 @@ +What: /sys/bus/usb/devices/INTERFACE/authorized +Date: August 2015 +Description: + This allows to authorize (1) or deauthorize (0) + individual interfaces instead a whole device + in contrast to the device authorization. + If a deauthorized interface will be authorized + so the driver probing must be triggered manually + by writing INTERFACE to /sys/bus/usb/drivers_probe + This allows to avoid side-effects with drivers + that need multiple interfaces. + A deauthorized interface cannot be probed or claimed. + +What: /sys/bus/usb/devices/usbX/interface_authorized_default +Date: August 2015 +Description: + This is used as value that determines if interfaces + would be authorized by default. + The value can be 1 or 0. It's by default 1. + What: /sys/bus/usb/device/.../authorized Date: July 2008 KernelVersion: 2.6.26 diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -43,7 +43,7 @@ o udev 081 # udevd --version o grub 0.93 # grub --version || grub-install --version o mcelog 0.6 # mcelog --version o iptables 1.4.2 # iptables -V -o openssl & libcrypto 1.0.1k # openssl version +o openssl & libcrypto 1.0.0 # openssl version Kernel compilation diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt index 0d5bc46dc167..ad6949bff2e3 100644 --- a/Documentation/device-mapper/snapshot.txt +++ b/Documentation/device-mapper/snapshot.txt @@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor the amount of free space and expand the <COW device> before it fills up. <persistent?> is P (Persistent) or N (Not persistent - will not survive -after reboot). -The difference is that for transient snapshots less metadata must be -saved on disk - they can be kept in memory by the kernel. +after reboot). O (Overflow) can be added as a persistent store option +to allow userspace to advertise its support for seeing "Overflow" in the +snapshot status. So supported store types are "P", "PO" and "N". + +The difference between persistent and transient is with transient +snapshots less metadata must be saved on disk - they can be kept in +memory by the kernel. * snapshot-merge <origin> <COW device> <persistent> <chunksize> diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt @@ -25,7 +25,7 @@ Example: /* Cypress Gen3 touchpad */ touchpad@67 { compatible = "cypress,cyapa"; - reg = <0x24>; + reg = <0x67>; interrupt-parent = <&gpio>; interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ wakeup-source; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt @@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority interrupt. Required Properties: -- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" - as fallback +- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or + "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" - reg: Base address and size of the controllers memory area - interrupt-parent: phandle of the parent interrupt controller. - interrupts: Interrupt specifier for the controllers interrupt. @@ -13,6 +13,9 @@ Required Properties: - #interrupt-cells : Specifies the number of cells needed to encode interrupt source, should be 1 +Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, +use ar7240 for all other SoCs. + Please refer to interrupts.txt in this directory for details of the common Interrupt Controllers bindings used by client devices. @@ -28,3 +31,16 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +Another example: + + interrupt-controller@18060010 { + compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; + reg = <0x18060010 0x4>; + + interrupt-parent = <&cpuintc>; + interrupts = <6>; + + interrupt-controller; + #interrupt-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt new file mode 100644 index 000000000000..761c4bc24a9b --- /dev/null +++ b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt @@ -0,0 +1,47 @@ +Broadcom Cygnus PCIe PHY + +Required properties: +- compatible: must be "brcm,cygnus-pcie-phy" +- reg: base address and length of the PCIe PHY block +- #address-cells: must be 1 +- #size-cells: must be 0 + +Each PCIe PHY should be represented by a child node + +Required properties For the child node: +- reg: the PHY ID +0 - PCIe RC 0 +1 - PCIe RC 1 +- #phy-cells: must be 0 + +Example: + pcie_phy: phy@0301d0a0 { + compatible = "brcm,cygnus-pcie-phy"; + reg = <0x0301d0a0 0x14>; + + pcie0_phy: phy@0 { + reg = <0>; + #phy-cells = <0>; + }; + + pcie1_phy: phy@1 { + reg = <1>; + #phy-cells = <0>; + }; + }; + + /* users of the PCIe phy */ + + pcie0: pcie@18012000 { + ... + ... + phys = <&pcie0_phy>; + phy-names = "pcie-phy"; + }; + + pcie1: pcie@18013000 { + ... + ... + phys = <pcie1_phy>; + phy-names = "pcie-phy"; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt b/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt new file mode 100644 index 000000000000..00100cf3e037 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt @@ -0,0 +1,68 @@ +mt65xx USB3.0 PHY binding +-------------------------- + +This binding describes a usb3.0 phy for mt65xx platforms of Medaitek SoC. + +Required properties (controller (parent) node): + - compatible : should be "mediatek,mt8173-u3phy" + - reg : offset and length of register for phy, exclude port's + register. + - clocks : a list of phandle + clock-specifier pairs, one for each + entry in clock-names + - clock-names : must contain + "u3phya_ref": for reference clock of usb3.0 analog phy. + +Required nodes : a sub-node is required for each port the controller + provides. Address range information including the usual + 'reg' property is used inside these nodes to describe + the controller's topology. + +Required properties (port (child) node): +- reg : address and length of the register set for the port. +- #phy-cells : should be 1 (See second example) + cell after port phandle is phy type from: + - PHY_TYPE_USB2 + - PHY_TYPE_USB3 + +Example: + +u3phy: usb-phy@11290000 { + compatible = "mediatek,mt8173-u3phy"; + reg = <0 0x11290000 0 0x800>; + clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>; + clock-names = "u3phya_ref"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "okay"; + + phy_port0: port@11290800 { + reg = <0 0x11290800 0 0x800>; + #phy-cells = <1>; + status = "okay"; + }; + + phy_port1: port@11291000 { + reg = <0 0x11291000 0 0x800>; + #phy-cells = <1>; + status = "okay"; + }; +}; + +Specifying phy control of devices +--------------------------------- + +Device nodes should specify the configuration required in their "phys" +property, containing a phandle to the phy port node and a device type; +phy-names for each port are optional. + +Example: + +#include <dt-bindings/phy/phy.h> + +usb30: usb@11270000 { + ... + phys = <&phy_port0 PHY_TYPE_USB3>; + phy-names = "usb3-0"; + ... +}; diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt index 60c6f2a633e0..0289d3b07853 100644 --- a/Documentation/devicetree/bindings/phy/samsung-phy.txt +++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt @@ -44,6 +44,9 @@ Required properties: - the "ref" clock is used to get the rate of the clock provided to the PHY module +Optional properties: +- vbus-supply: power-supply phandle for vbus power source + The first phandle argument in the PHY specifier identifies the PHY, its meaning is compatible dependent. For the currently supported SoCs (Exynos 4210 and Exynos 4212) it is as follows: diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index 8f771441be60..705075da2f10 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings: - renesas,tx-fifo-size : Overrides the default tx fifo size given in words (default is 64) - renesas,rx-fifo-size : Overrides the default rx fifo size given in words - (default is 64, or 256 on R-Car Gen2) + (default is 64) Pinctrl properties might be needed, too. See Documentation/devicetree/bindings/pinctrl/renesas,*. diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index 64a4ca6cf96f..7d48f63db44e 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -5,6 +5,7 @@ Required properties: - "renesas,usbhs-r8a7790" - "renesas,usbhs-r8a7791" - "renesas,usbhs-r8a7794" + - "renesas,usbhs-r8a7795" - reg: Base address and length of the register for the USBHS - interrupts: Interrupt specifier for the USBHS - clocks: A list of phandle + clock specifier pairs diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is ABS_MT_POSITION_X := T_X ABS_MT_POSITION_Y := T_Y ABS_MT_TOOL_X := C_X - ABS_MT_TOOL_X := C_Y + ABS_MT_TOOL_Y := C_Y Unfortunately, there is not enough information to specify both the touching ellipse and the tool ellipse, so one has to resort to approximations. One diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt @@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned (alternatively, the runtime_suspend() callback will have to check if the device should really be suspended and return -EAGAIN if that is not the case). -The runtime PM of PCI devices is disabled by default. It is also blocked by -pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI -driver implements the runtime PM callbacks and intends to use the runtime PM -framework provided by the PM core and the PCI subsystem, it should enable this -feature by executing the pm_runtime_enable() helper function. However, the -driver should not call the pm_runtime_allow() helper function unblocking -the runtime PM of the device. Instead, it should allow user space or some -platform-specific code to do that (user space can do it via sysfs), although -once it has called pm_runtime_enable(), it must be prepared to handle the +The runtime PM of PCI devices is enabled by default by the PCI core. PCI +device drivers do not need to enable it and should not attempt to do so. +However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() +helper function. In addition to that, the runtime PM usage counter of +each PCI device is incremented by local_pci_probe() before executing the +probe callback provided by the device's driver. + +If a PCI driver implements the runtime PM callbacks and intends to use the +runtime PM framework provided by the PM core and the PCI subsystem, it needs +to decrement the device's runtime PM usage counter in its probe callback +function. If it doesn't do that, the counter will always be different from +zero for the device and it will never be runtime-suspended. The simplest +way to do that is by calling pm_runtime_put_noidle(), but if the driver +wants to schedule an autosuspend right away, for example, it may call +pm_runtime_put_autosuspend() instead for this purpose. Generally, it +just needs to call a function that decrements the devices usage counter +from its probe routine to make runtime PM work for the device. + +It is important to remember that the driver's runtime_suspend() callback +may be executed right after the usage counter has been decremented, because +user space may already have cuased the pm_runtime_allow() helper function +unblocking the runtime PM of the device to run via sysfs, so the driver must +be prepared to cope with that. + +The driver itself should not call pm_runtime_allow(), though. Instead, it +should let user space or some platform-specific code do that (user space can +do it via sysfs as stated above), but it must be prepared to handle the runtime PM of the device correctly as soon as pm_runtime_allow() is called -(which may happen at any time). [It also is possible that user space causes -pm_runtime_allow() to be called via sysfs before the driver is loaded, so in -fact the driver has to be prepared to handle the runtime PM of the device as -soon as it calls pm_runtime_enable().] +(which may happen at any time, even before the driver is loaded). + +When the driver's remove callback runs, it has to balance the decrementation +of the device's runtime PM usage counter at the probe time. For this reason, +if it has decremented the counter in its probe callback, it must run +pm_runtime_get_noresume() in its remove callback. [Since the core carries +out a runtime resume of the device and bumps up the device's usage counter +before running the driver's remove callback, the runtime PM of the device +is effectively disabled for the duration of the remove execution and all +runtime PM helper functions incrementing the device's usage counter are +then effectively equivalent to pm_runtime_get_noresume().] The runtime PM framework works by processing requests to suspend or resume devices, or to check if they are idle (in which cases it is reasonable to diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c @@ -18,6 +18,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ #include <errno.h> #include <fcntl.h> #include <inttypes.h> diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt index c069b6884c77..c7e985f05d8f 100644 --- a/Documentation/usb/authorization.txt +++ b/Documentation/usb/authorization.txt @@ -90,3 +90,34 @@ etc, but you get the idea. Anybody with access to a device gadget kit can fake descriptors and device info. Don't trust that. You are welcome. + +Interface authorization +----------------------- +There is a similar approach to allow or deny specific USB interfaces. +That allows to block only a subset of an USB device. + +Authorize an interface: +$ echo 1 > /sys/bus/usb/devices/INTERFACE/authorized + +Deauthorize an interface: +$ echo 0 > /sys/bus/usb/devices/INTERFACE/authorized + +The default value for new interfaces +on a particular USB bus can be changed, too. + +Allow interfaces per default: +$ echo 1 > /sys/bus/usb/devices/usbX/interface_authorized_default + +Deny interfaces per default: +$ echo 0 > /sys/bus/usb/devices/usbX/interface_authorized_default + +Per default the interface_authorized_default bit is 1. +So all interfaces would authorized per default. + +Note: +If a deauthorized interface will be authorized so the driver probing must +be triggered manually by writing INTERFACE to /sys/bus/usb/drivers_probe + +For drivers that need multiple interfaces all needed interfaces should be +authroized first. After that the drivers should be probed. +This avoids side effects. diff --git a/MAINTAINERS b/MAINTAINERS index 9f6685f6c5a9..7f142e748bea 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1297,6 +1297,13 @@ F: arch/arm/mach-mediatek/ N: mtk K: mediatek +ARM/Mediatek USB3 PHY DRIVER +M: Chunfeng Yun <chunfeng.yun@mediatek.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/phy/phy-mt65xx-usb3.c + ARM/MICREL KS8695 ARCHITECTURE M: Greg Ungerer <gerg@uclinux.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -4003,7 +4010,7 @@ S: Maintained F: sound/usb/misc/ua101.c EXTENSIBLE FIRMWARE INTERFACE (EFI) -M: Matt Fleming <matt.fleming@intel.com> +M: Matt Fleming <matt@codeblueprint.co.uk> L: linux-efi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git S: Maintained @@ -4018,7 +4025,7 @@ F: include/linux/efi*.h EFI VARIABLE FILESYSTEM M: Matthew Garrett <matthew.garrett@nebula.com> M: Jeremy Kerr <jk@ozlabs.org> -M: Matt Fleming <matt.fleming@intel.com> +M: Matt Fleming <matt@codeblueprint.co.uk> T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git L: linux-efi@vger.kernel.org S: Maintained @@ -5957,7 +5964,7 @@ F: virt/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V M: Joerg Roedel <joro@8bytes.org> L: kvm@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c @@ -5965,7 +5972,7 @@ F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC M: Alexander Graf <agraf@suse.com> L: kvm-ppc@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ T: git git://github.com/agraf/linux-2.6.git S: Supported F: arch/powerpc/include/asm/kvm* @@ -9914,7 +9921,6 @@ S: Maintained F: drivers/staging/lustre STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) -M: Julian Andres Klode <jak@jak-linux.org> M: Marc Dietrich <marvin24@gmx.de> L: ac100@lists.launchpad.net (moderated for non-subscribers) L: linux-tegra@vger.kernel.org @@ -11378,15 +11384,6 @@ W: http://oops.ghostprotocols.net:81/blog S: Maintained F: drivers/net/wireless/wl3501* -WM97XX TOUCHSCREEN DRIVERS -M: Mark Brown <broonie@kernel.org> -M: Liam Girdwood <lrg@slimlogic.co.uk> -L: linux-input@vger.kernel.org -W: https://github.com/CirrusLogic/linux-drivers/wiki -S: Supported -F: drivers/input/touchscreen/*wm97* -F: include/linux/wm97xx.h - WOLFSON MICROELECTRONICS DRIVERS L: patches@opensource.wolfsonmicro.com T: git https://github.com/CirrusLogic/linux-drivers.git @@ -1,8 +1,8 @@ VERSION = 4 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc3 -NAME = Hurr durr I'ma sheep +EXTRAVERSION = -rc5 +NAME = Blurry Fish Butt # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1521..902e6ab00a06 100644 --- a/arch/alpha/include/asm/word-at-a-time.h +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) #endif } +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -48,4 +48,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 233159d2eaab..bb8fa023d574 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-hackberry.dtb \ sun4i-a10-hyundai-a7hd.dtb \ sun4i-a10-inet97fv2.dtb \ - sun4i-a10-itead-iteaduino-plus.dts \ + sun4i-a10-itead-iteaduino-plus.dtb \ sun4i-a10-jesurun-q5.dtb \ sun4i-a10-marsboard.dtb \ sun4i-a10-mini-xplus.dtb \ diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index ca0e3c15977f..294cfe40388d 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi @@ -98,6 +98,7 @@ opp-hz = /bits/ 64 <800000000>; opp-microvolt = <1000000>; clock-latency-ns = <200000>; + opp-suspend; }; opp07 { opp-hz = /bits/ 64 <900000000>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 15aea760c1da..c625e71217aa 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -197,6 +197,7 @@ regulator-name = "P1.8V_LDO_OUT10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo11_reg: LDO11 { diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index df9aee92ecf4..1b3d6c769a3c 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -1117,7 +1117,7 @@ interrupt-parent = <&combiner>; interrupts = <3 0>; clock-names = "sysmmu", "master"; - clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; + clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>; power-domains = <&disp_pd>; #iommu-cells = <0>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 79ffdfe712aa..3b43e57845ae 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -472,7 +472,6 @@ */ pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; pinctrl-names = "default"; - samsung,pwm-outputs = <0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index 66e47de5e826..96d7eede412e 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts @@ -36,7 +36,7 @@ pinctrl-0 = <&pinctrl_pmic>; reg = <0x08>; interrupt-parent = <&gpio5>; - interrupts = <23 0x8>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; regulators { sw1_reg: sw1a { regulator-name = "SW1"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c3e3ca9362fb..cd170376eaca 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -15,6 +15,7 @@ #include <dt-bindings/clock/imx5-clock.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> +#include <dt-bindings/interrupt-controller/irq.h> / { aliases { diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 3373fd958e95..a50356243888 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -35,7 +35,6 @@ compatible = "regulator-fixed"; reg = <1>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbh1>; regulator-name = "usbh1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; @@ -47,7 +46,6 @@ compatible = "regulator-fixed"; reg = <2>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbotg>; regulator-name = "usb_otg_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index a0b2a79cbfbd..4624d0f2a754 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -1627,6 +1627,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 831525dd39a6..1666c8a6b143 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -1677,6 +1677,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 2bebaa286f9a..391230c3dc93 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -107,7 +107,7 @@ 720000 1200000 528000 1100000 312000 1000000 - 144000 900000 + 144000 1000000 >; #cooling-cells = <2>; cooling-min-level = <0>; diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9bdf54795f05..56978199c479 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -20,6 +20,7 @@ #include <asm/cputype.h> #include <asm/cp15.h> #include <asm/mcpm.h> +#include <asm/smp_plat.h> #include "regs-pmu.h" #include "common.h" @@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; - exynos_cpu_power_up(cpunr); + if (!exynos_cpu_power_state(cpunr)) { + exynos_cpu_power_up(cpunr); + + /* + * This assumes the cluster number of the big cores(Cortex A15) + * is 0 and the Little cores(Cortex A7) is 1. + * When the system was booted from the Little core, + * they should be reset during power up cpu. + */ + if (cluster && + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { + /* + * Before we reset the Little cores, we should wait + * the SPARE2 register is set to 1 because the init + * codes of the iROM will set the register after + * initialization. + */ + while (!pmu_raw_readl(S5P_PMU_SPARE2)) + udelay(10); + + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), + EXYNOS_SWRESET); + } + } + return 0; } diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h index b7614333d296..fba9068ed260 100644 --- a/arch/arm/mach-exynos/regs-pmu.h +++ b/arch/arm/mach-exynos/regs-pmu.h @@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr) #define SPREAD_ENABLE 0xF #define SPREAD_USE_STANDWFI 0xF +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8) +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20) + +#define EXYNOS5420_KFC_CORE_RESET(_nr) \ + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) + #define EXYNOS5420_BB_CON1 0x0784 #define EXYNOS5420_BB_SEL_EN BIT(31) #define EXYNOS5420_BB_PMOS_EN BIT(7) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b0329be95cb1..26b066690593 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) @@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index cebf78661a55..253021ef2769 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook) } /* - * Call registered single step handers + * Call registered single step handlers * There is no Syndrome info to check for determining the handler. * So we call all the registered handlers, until the right handler is * found which returns zero. @@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, * Use reader/writer locks instead of plain spinlock. */ static LIST_HEAD(break_hook); -static DEFINE_RWLOCK(break_hook_lock); +static DEFINE_SPINLOCK(break_hook_lock); void register_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_add(&hook->node, &break_hook); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_add_rcu(&hook->node, &break_hook); + spin_unlock(&break_hook_lock); } void unregister_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_del(&hook->node); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&break_hook_lock); + synchronize_rcu(); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) @@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) struct break_hook *hook; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; - read_lock(&break_hook_lock); - list_for_each_entry(hook, &break_hook, node) + rcu_read_lock(); + list_for_each_entry_rcu(hook, &break_hook, node) if ((esr & hook->esr_mask) == hook->esr_val) fn = hook->fn; - read_unlock(&break_hook_lock); + rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..13671a9cf016 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void) */ if (!is_normal_ram(md)) prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) + else if (md->type == EFI_RUNTIME_SERVICES_CODE || + !PAGE_ALIGNED(md->phys_addr)) prot = PAGE_KERNEL_EXEC; else prot = PAGE_KERNEL; diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -178,6 +178,24 @@ ENTRY(ftrace_stub) ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* save return value regs*/ + .macro save_return_regs + sub sp, sp, #64 + stp x0, x1, [sp] + stp x2, x3, [sp, #16] + stp x4, x5, [sp, #32] + stp x6, x7, [sp, #48] + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + add sp, sp, #64 + .endm + /* * void ftrace_graph_caller(void) * @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. */ ENTRY(return_to_handler) - str x0, [sp, #-16]! + save_return_regs mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address - ldr x0, [sp], #16 + restore_return_regs ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index f341866aa810..c08b9ad6f429 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn) aarch64_insn_is_bcond(insn)); } -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap) { @@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) unsigned long flags = 0; int ret; - spin_lock_irqsave(&patch_lock, flags); + raw_spin_lock_irqsave(&patch_lock, flags); waddr = patch_map(addr, FIX_TEXT_POKE0); ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); patch_unmap(FIX_TEXT_POKE0); - spin_unlock_irqrestore(&patch_lock, flags); + raw_spin_unlock_irqrestore(&patch_lock, flags); return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6bab21f84a9f..232247945b1c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -364,6 +364,8 @@ static void __init relocate_initrd(void) to_free = ram_end - orig_start; size = orig_end - orig_start; + if (!size) + return; /* initrd needs to be relocated completely inside linear mapping */ new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index aba9ead1384c..9fadf6d7039b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -287,6 +287,7 @@ retry: * starvation. */ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -20,4 +20,5 @@ generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -46,4 +46,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -43,4 +43,5 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += types.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 70e6ae1e7006..373cb23301e3 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -73,4 +73,5 @@ generic-y += uaccess.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -58,4 +58,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += module.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -344,6 +349,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -355,6 +360,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -345,6 +350,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -364,6 +369,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -354,6 +359,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h @@ -4,4 +4,34 @@ #define __ALIGN .align 4 #define __ALIGN_STR ".align 4" +/* + * Make sure the compiler doesn't do anything stupid with the + * arguments on the stack - they are owned by the *caller*, not + * the callee. This just fools gcc into not spilling into them, + * and keeps it from doing tailcall recursion and/or using the + * stack slots for temporaries, since they are live and "used" + * all the way to the end of the function. + */ +#define asmlinkage_protect(n, ret, args...) \ + __asmlinkage_protect##n(ret, ##args) +#define __asmlinkage_protect_n(ret, args...) \ + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) +#define __asmlinkage_protect0(ret) \ + __asmlinkage_protect_n(ret) +#define __asmlinkage_protect1(ret, arg1) \ + __asmlinkage_protect_n(ret, "m" (arg1)) +#define __asmlinkage_protect2(ret, arg1, arg2) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4)) +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5)) +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5), "m" (arg6)) + #endif diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 356 +#define NR_syscalls 375 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -361,5 +361,24 @@ #define __NR_memfd_create 353 #define __NR_bpf 354 #define __NR_execveat 355 +#define __NR_socket 356 +#define __NR_socketpair 357 +#define __NR_bind 358 +#define __NR_connect 359 +#define __NR_listen 360 +#define __NR_accept4 361 +#define __NR_getsockopt 362 +#define __NR_setsockopt 363 +#define __NR_getsockname 364 +#define __NR_getpeername 365 +#define __NR_sendto 366 +#define __NR_sendmsg 367 +#define __NR_recvfrom 368 +#define __NR_recvmsg 369 +#define __NR_shutdown 370 +#define __NR_recvmmsg 371 +#define __NR_sendmmsg 372 +#define __NR_userfaultfd 373 +#define __NR_membarrier 374 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -376,4 +376,22 @@ ENTRY(sys_call_table) .long sys_memfd_create .long sys_bpf .long sys_execveat /* 355 */ - + .long sys_socket + .long sys_socketpair + .long sys_bind + .long sys_connect + .long sys_listen /* 360 */ + .long sys_accept4 + .long sys_getsockopt + .long sys_setsockopt + .long sys_getsockname + .long sys_getpeername /* 365 */ + .long sys_sendto + .long sys_sendmsg + .long sys_recvfrom + .long sys_recvmsg + .long sys_shutdown /* 370 */ + .long sys_recvmmsg + .long sys_sendmmsg + .long sys_userfaultfd + .long sys_membarrier diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -54,4 +54,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 15ecb4831e12..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( return 0; } -IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", - ath79_misc_intc_of_init); + +static int __init ar7100_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", + ar7100_misc_intc_of_init); + +static int __init ar7240_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", + ar7240_misc_intc_of_init); static int __init ar79_cpu_intc_of_init( struct device_node *node, struct device_node *parent) diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -933,7 +933,7 @@ void __init plat_mem_setup(void) while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) && (total < MAX_MEMORY)) { memory = cvmx_bootmem_phy_alloc(mem_alloc_size, - __pa_symbol(&__init_end), -1, + __pa_symbol(&_end), -1, 0x100000, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (memory >= 0) { diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -17,4 +17,5 @@ generic-y += segment.h generic-y += serial.h generic-y += trace_clock.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -20,6 +20,9 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif +#ifndef cpu_has_ftlb +#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) +#endif #ifndef cpu_has_tlbinv #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -385,6 +385,7 @@ enum cpu_type_enum { #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ +#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 9e777cd42b67..d10fd80dbb7e 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si */ #define ioremap_nocache(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED) +#define ioremap_uc ioremap_nocache /* * ioremap_cachable - map bus memory into CPU space diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h @@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, } /** + * maar_init() - initialise MAARs + * + * Performs initialisation of MAARs for the current CPU, making use of the + * platforms implementation of platform_maar_init where necessary and + * duplicating the setup it provides on secondary CPUs. + */ +extern void maar_init(void); + +/** * struct maar_config - MAAR configuration data * @lower: The lowest address that the MAAR pair will affect. Must be * aligned to a 2^16 byte boundary. diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) +BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) /* Core Local & Core Other register accessor functions */ BUILD_CM_Cx_RW(reset_release, 0x00) @@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) +/* GCR_SYS_CONFIG2 register fields */ +#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 +#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) + /* GCR_Cx_COHERENCE register fields */ #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) @@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) return read_gcr_rev(); } +/** + * mips_cm_max_vp_width() - return the width in bits of VP indices + * + * Return: the width, in bits, of VP indices in fields that combine core & VP + * indices. + */ +static inline unsigned int mips_cm_max_vp_width(void) +{ + extern int smp_num_siblings; + + if (mips_cm_revision() >= CM_REV_CM3) + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + + return smp_num_siblings; +} + +/** + * mips_cm_vp_id() - calculate the hardware VP ID for a CPU + * @cpu: the CPU whose VP ID to calculate + * + * Hardware such as the GIC uses identifiers for VPs which may not match the + * CPU numbers used by Linux. This function calculates the hardware VP + * identifier corresponding to a given CPU. + * + * Return: the VP ID for the CPU. + */ +static inline unsigned int mips_cm_vp_id(unsigned int cpu) +{ + unsigned int core = cpu_data[cpu].core; + unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); + + return (core * mips_cm_max_vp_width()) + vp; +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -487,6 +487,8 @@ /* Bits specific to the MIPS32/64 PRA. */ #define MIPS_CONF_MT (_ULCAST_(7) << 7) +#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) +#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) #define MIPS_CONF_AR (_ULCAST_(7) << 10) #define MIPS_CONF_AT (_ULCAST_(3) << 13) #define MIPS_CONF_M (_ULCAST_(1) << 31) diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index c4ddc4f0d2dc..23cd9b118c9e 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,16 +13,15 @@ #define __SWAB_64_THRU_32__ -#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ - defined(_MIPS_ARCH_LOONGSON3A) +#if !defined(__mips16) && \ + ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ + defined(_MIPS_ARCH_LOONGSON3A)) -static inline __attribute__((nomips16)) __attribute_const__ - __u16 __arch_swab16(__u16 x) +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " .set pop \n" : "=r" (x) @@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab16 __arch_swab16 -static inline __attribute__((nomips16)) __attribute_const__ - __u32 __arch_swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " rotr %0, %0, 16 \n" " .set pop \n" @@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__ * 64-bit kernel on r2 CPUs. */ #ifdef __mips64 -static inline __attribute__((nomips16)) __attribute_const__ - __u64 __arch_swab64(__u64 x) +static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( " .set push \n" " .set arch=mips64r2 \n" - " .set nomips16 \n" " dsbh %0, %1 \n" " dshd %0, %0 \n" " .set pop \n" @@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab64 __arch_swab64 #endif /* __mips64 */ -#endif /* MIPS R2 or newer or Loongson 3A */ +#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -377,16 +377,18 @@ #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) #define __NR_execveat (__NR_Linux + 356) +#define __NR_userfaultfd (__NR_Linux + 357) +#define __NR_membarrier (__NR_Linux + 358) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 356 +#define __NR_Linux_syscalls 358 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 356 +#define __NR_O32_Linux_syscalls 358 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -711,16 +713,18 @@ #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) #define __NR_execveat (__NR_Linux + 316) +#define __NR_userfaultfd (__NR_Linux + 317) +#define __NR_membarrier (__NR_Linux + 318) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 316 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 316 +#define __NR_64_Linux_syscalls 318 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1049,15 +1053,17 @@ #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) #define __NR_execveat (__NR_Linux + 320) +#define __NR_userfaultfd (__NR_Linux + 321) +#define __NR_membarrier (__NR_Linux + 322) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 320 +#define __NR_Linux_syscalls 322 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 320 +#define __NR_N32_Linux_syscalls 322 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -26,6 +26,7 @@ #include <linux/power/jz4740-battery.h> #include <linux/power/gpio-charger.h> +#include <asm/mach-jz4740/gpio.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/jz4740_mmc.h> #include <asm/mach-jz4740/jz4740_nand.h> diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index a74e181058b0..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -28,6 +28,7 @@ #include <linux/seq_file.h> #include <asm/mach-jz4740/base.h> +#include <asm/mach-jz4740/gpio.h> #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -39,6 +39,7 @@ mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt + nop .endm .section .text.cps-vec @@ -223,10 +224,9 @@ LEAF(excep_ejtag) END(excep_ejtag) LEAF(mips_cps_core_init) -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f - nop .set push .set mips64r2 @@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + li t9, 0 +#ifdef CONFIG_MIPS_MT_SMP has_mt ta2, 1f - li t9, 0 /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 @@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 +#endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE @@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) PTR_ADDU v0, v0, ta3 -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* If the core doesn't support MT then return */ bnez ta2, 1f @@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) 2: .set pop -#endif /* CONFIG_MIPS_MT */ +#endif /* CONFIG_MIPS_MT_SMP */ /* Return */ jr ra diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; - int isa; + int isa, mt; config0 = read_c0_config(); /* * Look for Standard TLB or Dual VTLB and FTLB */ - if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || - (((config0 & MIPS_CONF_MT) >> 7) == 4)) + mt = config0 & MIPS_CONF_MT; + if (mt == MIPS_CONF_MT_TLB) c->options |= MIPS_CPU_TLB; + else if (mt == MIPS_CONF_MT_FTLB) + c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { @@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) if (cpu_has_tlb) { if (((config4 & MIPS_CONF4_IE) >> 29) == 2) c->options |= MIPS_CPU_TLBINV; + /* - * This is a bit ugly. R6 has dropped that field from - * config4 and the only valid configuration is VTLB+FTLB so - * set a good value for mmuextdef for that case. + * R6 has dropped the MMUExtDef field from config4. + * On R6 the fields always describe the FTLB, and only if it is + * present according to Config.MT. */ - if (cpu_has_mips_r6) + if (!cpu_has_mips_r6) + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + else if (cpu_has_ftlb) mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; else - mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + mmuextdef = 0; switch (mmuextdef) { case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -18,7 +18,7 @@ .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 7 LEAF(resume) @@ -28,30 +28,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* - * check if we need to save FPU registers - */ - .set push - .set noreorder - beqz a3, 1f - PTR_L t3, TASK_THREAD_INFO(a0) - .set pop - - /* - * clear saved user stack CU1 bit - */ - LONG_L t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - LONG_S t0, ST_OFF(t3) - - .set push - .set arch=mips64r2 - fpu_save_double a0 t0 t1 # c0_status passed in t0 - # clobbers t1 - .set pop -1: - #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -31,18 +31,8 @@ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) /* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - -/* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS @@ -50,22 +40,6 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) - beqz a3, 1f - - PTR_L t3, TASK_THREAD_INFO(a0) - - /* - * clear saved user stack CU1 bit - */ - lw t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - sw t0, ST_OFF(t3) - - fpu_save_single a0, t0 # clobbers t0 - -1: - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) lw t1, PT_EPC(sp) # skip syscall on return subu v0, v0, __NR_O32_Linux # check syscall number - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) - beqz t0, illegal_syscall - - sll t0, v0, 2 - la t1, sys_call_table - addu t1, t0 - lw t2, (t1) # syscall routine - beqz t2, illegal_syscall sw a3, PT_R26(sp) # save a3 for syscall restarting @@ -96,6 +88,16 @@ loads_done: li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes +syscall_common: + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + beqz t0, illegal_syscall + + sll t0, v0, 2 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + + beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) @@ -116,7 +118,7 @@ o32_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp /* @@ -129,27 +131,18 @@ syscall_trace_entry: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall + + move v0, s0 # restore syscall - move t0, s0 RESTORE_STATIC lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f - - lw t1, PT_R2(sp) # syscall number - negu v0 # error - sw t1, PT_R0(sp) # save it for syscall restarting -1: sw v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -599,3 +592,5 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) .set at #endif - dsubu t0, v0, __NR_64_Linux # check syscall number - sltiu t0, t0, __NR_64_Linux_syscalls + 1 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif - beqz t0, illegal_syscall - - dsll t0, v0, 3 # offset into table - ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) - # syscall routine sd a3, PT_R26(sp) # save a3 for syscall restarting @@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) and t0, t1, t0 bnez t0, syscall_trace_entry +syscall_common: + dsubu t2, v0, __NR_64_Linux + sltiu t0, t2, __NR_64_Linux_syscalls + 1 + beqz t0, illegal_syscall + + dsll t0, t2, 3 # offset into table + dla t2, sys_call_table + daddu t0, t2, t0 + ld t2, (t0) # syscall routine + beqz t2, illegal_syscall + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -78,14 +82,14 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move v0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -93,19 +97,9 @@ syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ @@ -436,4 +430,6 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 5315 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) and t0, t1, t0 bnez t0, n32_syscall_trace_entry +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -75,9 +76,9 @@ n32_syscall_trace_entry: move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -85,19 +86,9 @@ n32_syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to @@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) PTR sys_memfd_create PTR sys_bpf PTR compat_sys_execveat /* 6320 */ + PTR sys_userfaultfd + PTR sys_membarrier .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -87,6 +87,7 @@ loads_done: and t0, t1, t0 bnez t0, trace_a_syscall +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -130,9 +131,9 @@ trace_a_syscall: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -142,19 +143,9 @@ trace_a_syscall: ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -584,4 +575,6 @@ EXPORT(sys32_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR compat_sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -338,7 +338,7 @@ static void __init bootmem_init(void) if (end <= reserved_end) continue; #ifdef CONFIG_BLK_DEV_INITRD - /* mapstart should be after initrd_end */ + /* Skip zones before initrd and initrd itself */ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) continue; #endif @@ -371,6 +371,14 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } +#ifdef CONFIG_BLK_DEV_INITRD + /* + * mapstart should be after initrd_end + */ + if (initrd_end) + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); +#endif + /* * Initialize the boot-time allocator with low memory only. */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -42,6 +42,7 @@ #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> +#include <asm/maar.h> cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ @@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); + maar_init(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -64,6 +64,9 @@ void __init prom_init_env(void) } if (memsize == 0) memsize = 256; + + loongson_sysconf.nr_uarts = 1; + pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); #else struct boot_params *boot_p; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -44,6 +44,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> +#include <asm/maar.h> /* * We have up to 8 empty zeroed pages so we can map one of the right colour @@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + struct maar_config cfg[BOOT_MEM_MAP_MAX]; + unsigned i, num_configured, num_cfg = 0; + phys_addr_t skip; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: + continue; + } + + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); + + cfg[num_cfg].lower = boot_mem_map.map[i].addr; + cfg[num_cfg].lower += skip; + + cfg[num_cfg].upper = cfg[num_cfg].lower; + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; + cfg[num_cfg].upper -= skip; + + cfg[num_cfg].attrs = MIPS_MAAR_S; + num_cfg++; + } + + num_configured = maar_config(cfg, num_cfg, num_pairs); + if (num_configured < num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", + num_pairs, num_cfg); + + return num_configured; +} + +void maar_init(void) +{ + unsigned num_maars, used, i; + phys_addr_t lower, upper, attr; + static struct { + struct maar_config cfgs[3]; + unsigned used; + } recorded = { { { 0 } }, 0 }; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Set MAARs using values we recorded already */ + if (recorded.used) { + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); + BUG_ON(used != recorded.used); + } else { + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + } + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } + + if (recorded.used) + return; + + pr_info("MAAR configuration:\n"); + for (i = 0; i < num_maars; i += 2) { + write_c0_maari(i); + back_to_back_c0_hazard(); + upper = read_c0_maar(); + + write_c0_maari(i + 1); + back_to_back_c0_hazard(); + lower = read_c0_maar(); + + attr = lower & upper; + lower = (lower & MIPS_MAAR_ADDR) << 4; + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; + + pr_info(" [%d]: ", i / 2); + if (!(attr & MIPS_MAAR_V)) { + pr_cont("disabled\n"); + continue; + } + + pr_cont("%pa-%pa", &lower, &upper); + + if (attr & MIPS_MAAR_S) + pr_cont(" speculate"); + + pr_cont("\n"); + + /* Record the setup for use on secondary CPUs */ + if (used <= ARRAY_SIZE(recorded.cfgs)) { + recorded.cfgs[recorded.used].lower = lower; + recorded.cfgs[recorded.used].upper = upper; + recorded.cfgs[recorded.used].attrs = attr; + recorded.used++; + } + } +} + #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { @@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) #endif } -unsigned __weak platform_maar_init(unsigned num_pairs) -{ - struct maar_config cfg[BOOT_MEM_MAP_MAX]; - unsigned i, num_configured, num_cfg = 0; - phys_addr_t skip; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - continue; - } - - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); - - cfg[num_cfg].lower = boot_mem_map.map[i].addr; - cfg[num_cfg].lower += skip; - - cfg[num_cfg].upper = cfg[num_cfg].lower; - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; - cfg[num_cfg].upper -= skip; - - cfg[num_cfg].attrs = MIPS_MAAR_S; - num_cfg++; - } - - num_configured = maar_config(cfg, num_cfg, num_pairs); - if (num_configured < num_cfg) - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", - num_pairs, num_cfg); - - return num_configured; -} - -static void maar_init(void) -{ - unsigned num_maars, used, i; - - if (!cpu_has_maar) - return; - - /* Detect the number of MAARs */ - write_c0_maari(~0); - back_to_back_c0_hazard(); - num_maars = read_c0_maari() + 1; - - /* MAARs should be in pairs */ - WARN_ON(num_maars % 2); - - /* Configure the required MAARs */ - used = platform_maar_init(num_maars / 2); - - /* Disable any further MAARs */ - for (i = (used * 2); i < num_maars; i++) { - write_c0_maari(i); - back_to_back_c0_hazard(); - write_c0_maar(0); - back_to_back_c0_hazard(); - } -} - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S @@ -57,15 +57,28 @@ LEAF(sk_load_word) is_offset_negative(word) - .globl sk_load_word_positive -sk_load_word_positive: +FEXPORT(sk_load_word_positive) is_offset_in_header(4, word) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lw $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A rotr $r_A, t0, 16 +# else + sll t0, $r_A, 24 + srl t1, $r_A, 24 + srl t2, $r_A, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_A, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -73,15 +86,24 @@ sk_load_word_positive: LEAF(sk_load_half) is_offset_negative(half) - .globl sk_load_half_positive -sk_load_half_positive: +FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lh $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A seh $r_A, t0 +# else + sll t0, $r_A, 24 + andi t1, $r_A, 0xff00 + sra t0, t0, 16 + srl t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -89,8 +111,7 @@ sk_load_half_positive: LEAF(sk_load_byte) is_offset_negative(byte) - .globl sk_load_byte_positive -sk_load_byte_positive: +FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset @@ -148,23 +169,47 @@ sk_load_byte_positive: NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) bpf_slow_path_common(4) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_s0 jr $r_ra rotr $r_A, t0, 16 -#endif +# else + sll t0, $r_s0, 24 + srl t1, $r_s0, 24 + srl t2, $r_s0, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_s0, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra - move $r_A, $r_s0 + move $r_A, $r_s0 +#endif END(bpf_slow_path_word) NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) bpf_slow_path_common(2) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) jr $r_ra wsbh $r_A, $r_s0 -#endif +# else + sll t0, $r_s0, 8 + andi t1, $r_s0, 0xff00 + andi t0, t0, 0xff00 + srl t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra move $r_A, $r_s0 +#endif END(bpf_slow_path_half) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -61,4 +61,5 @@ generic-y += types.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 5b3a903adae6..e4396a7d0f7c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +static inline unsigned long zero_bytemask(unsigned long mask) +{ + return ~1ul << __fls(mask); +} + #else #ifdef CONFIG_64BIT diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index d4788111c161..fac6ac9790fa 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) KBUILD_CFLAGS += $(call cc-option,-ffreestanding) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0c98f1508542..ed7da281df66 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 82083e1fbdc4..9858b14cde1e 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c05c9e0821e3..7f14f80717d4 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 2a0efc63b9e5..dc19ee0c92aa 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h @@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn); int __node_distance(int a, int b); void numa_update_cpu_topology(void); -extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; extern int numa_debug_enabled; #else diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 27ebde643933..94fc55fc72ce 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu) #define cpumask_of_node cpumask_of_node static inline const struct cpumask *cpumask_of_node(int node) { - return node_to_cpumask_map[node]; + return &node_to_cpumask_map[node]; } /* diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 48c9af7a7683..3aeeb1b562c0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); + DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 09b039d7983d..582fe44ab07c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -733,6 +733,14 @@ ENTRY(psw_idle) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) +#ifdef CONFIG_SMP + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz .Lpsw_idle_stcctm + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) +.Lpsw_idle_stcctm: +#endif STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: @@ -1159,7 +1167,27 @@ cleanup_critical: jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) -1: # account system time going idle +1: # calculate idle cycles +#ifdef CONFIG_SMP + clg %r9,BASED(.Lcleanup_idle_insn) + jl 3f + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz 3f + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) + larl %r3,mt_cycles + ag %r3,__LC_PERCPU_OFFSET + la %r4,__SF_EMPTY+16(%r15) +2: lg %r0,0(%r3) + slg %r0,0(%r4) + alg %r0,64(%r4) + stg %r0,0(%r3) + la %r3,8(%r3) + la %r4,8(%r4) + brct %r1,2b +#endif +3: # account system time going idle lg %r9,__LC_STEAL_TIMER alg %r9,__CLOCK_IDLE_ENTER(%r2) slg %r9,__LC_LAST_UPDATE_CLOCK diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c8653435c70d..dafc44f519c3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock); static atomic64_t virt_timer_current; static atomic64_t virt_timer_elapsed; -static DEFINE_PER_CPU(u64, mt_cycles[32]); +DEFINE_PER_CPU(u64, mt_cycles[8]); static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_jiffies); @@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed) return elapsed >= atomic64_read(&virt_timer_current); } +static void update_mt_scaling(void) +{ + u64 cycles_new[8], *cycles_old; + u64 delta, fac, mult, div; + int i; + + stcctm5(smp_cpu_mtid + 1, cycles_new); + cycles_old = this_cpu_ptr(mt_cycles); + fac = 1; + mult = div = 0; + for (i = 0; i <= smp_cpu_mtid; i++) { + delta = cycles_new[i] - cycles_old[i]; + div += delta; + mult *= i + 1; + mult += delta * fac; + fac *= i + 1; + } + div *= fac; + if (div > 0) { + /* Update scaling factor */ + __this_cpu_write(mt_scaling_mult, mult); + __this_cpu_write(mt_scaling_div, div); + memcpy(cycles_old, cycles_new, + sizeof(u64) * (smp_cpu_mtid + 1)); + } + __this_cpu_write(mt_scaling_jiffies, jiffies_64); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. @@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) struct thread_info *ti = task_thread_info(tsk); u64 timer, clock, user, system, steal; u64 user_scaled, system_scaled; - int i; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; - /* Do MT utilization calculation */ + /* Update MT utilization calculation */ if (smp_cpu_mtid && - time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { - u64 cycles_new[32], *cycles_old; - u64 delta, fac, mult, div; - - cycles_old = this_cpu_ptr(mt_cycles); - if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { - fac = 1; - mult = div = 0; - for (i = 0; i <= smp_cpu_mtid; i++) { - delta = cycles_new[i] - cycles_old[i]; - div += delta; - mult *= i + 1; - mult += delta * fac; - fac *= i + 1; - } - div *= fac; - if (div > 0) { - /* Update scaling factor */ - __this_cpu_write(mt_scaling_mult, mult); - __this_cpu_write(mt_scaling_div, div); - memcpy(cycles_old, cycles_new, - sizeof(u64) * (smp_cpu_mtid + 1)); - } - } - __this_cpu_write(mt_scaling_jiffies, jiffies_64); - } + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); user = S390_lowcore.user_timer - ti->user_timer; S390_lowcore.steal_timer -= user; @@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk) S390_lowcore.last_update_timer = get_vtimer(); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + /* Update MT utilization calculation */ + if (smp_cpu_mtid && + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); + system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 7de4e2f780d7..30b2698a28e2 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core) cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask); - cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); + cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); top->node_id = core_node(core)->id; } } @@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa) /* Clear all node masks */ for (i = 0; i < MAX_NUMNODES; i++) - cpumask_clear(node_to_cpumask_map[i]); + cpumask_clear(&node_to_cpumask_map[i]); /* Rebuild all masks */ toptree_for_each(core, numa, CORE) diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 09b1d2355bd9..43f32ce60aa3 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -23,7 +23,7 @@ pg_data_t *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); -cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +cpumask_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); const struct numa_mode numa_mode_plain = { @@ -144,7 +144,7 @@ void __init numa_setup(void) static int __init numa_init_early(void) { /* Attach all possible CPUs to node 0 for now. */ - cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); return 0; } early_initcall(numa_init_early); diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -13,3 +13,4 @@ generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h +generic-y += word-at-a-time.h diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/string.h> #include <gxio/iorpc_globals.h> #include <gxio/iorpc_mpipe.h> @@ -29,32 +30,6 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow -/** - * strscpy - Copy a C-string into a sized buffer, but only if it fits - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @size: size of destination buffer - * - * Use this routine to avoid copying too-long strings. - * The routine returns the total number of bytes copied - * (including the trailing NUL) or zero if the buffer wasn't - * big enough. To ensure that programmers pay attention - * to the return code, the destination has a single NUL - * written at the front (if size is non-zero) when the - * buffer is not big enough. - */ -static size_t strscpy(char *dest, const char *src, size_t size) -{ - size_t len = strnlen(src, size) + 1; - if (len > size) { - if (size) - dest[0] = '\0'; - return 0; - } - memcpy(dest, src, len); - return len; -} - int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); @@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h index 9e5ce0d7b292..b66a693c2c34 100644 --- a/arch/tile/include/asm/word-at-a-time.h +++ b/arch/tile/include/asm/word-at-a-time.h @@ -6,7 +6,7 @@ struct word_at_a_time { /* unused */ }; #define WORD_AT_A_TIME_CONSTANTS {} -/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ +/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { @@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask) #endif } +#ifdef __BIG_ENDIAN +#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) +#else +#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/usb/tilegx.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/types.h> static u64 ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,4 +25,5 @@ generic-y += preempt.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -62,4 +62,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 328c8352480c..96d058a87100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1308,6 +1308,7 @@ config HIGHMEM config X86_PAE bool "PAE (Physical Address Extension) Support" depends on X86_32 && !HIGHMEM4G + select SWIOTLB ---help--- PAE is required for NX support, and furthermore enables larger swapspace support for non-overcommit purposes. It diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e6cf2ad350d1..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -193,7 +193,7 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index ab5f1d447ef9..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...); extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); +#ifdef CONFIG_KASAN /* * CONFIG_KASAN may redefine memset to __memset. __memset function is present * only in kernel binary. Since the EFI stub linked into a separate binary it @@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, #undef memcpy #undef memset #undef memmove +#endif #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b98b471a3b7e..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -141,6 +141,8 @@ #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) +#define MSR_PEBS_FRONTEND 0x000003f7 + #define MSR_IA32_POWER_CTL 0x000001fc #define MSR_IA32_MC0_CTL 0x00000400 diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h @@ -41,6 +41,7 @@ struct pvclock_wall_clock { #define PVCLOCK_TSC_STABLE_BIT (1 << 0) #define PVCLOCK_GUEST_STOPPED (1 << 1) +/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PVCLOCK_ABI_H */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 83aea8055119..4c20dd333412 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc) return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } -static inline int +static inline long HYPERVISOR_memory_op(unsigned int cmd, void *arg) { - return _hypercall2(int, memory_op, cmd, arg); + return _hypercall2(long, memory_op, cmd, arg); } static inline int diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h @@ -1,7 +1,7 @@ #ifndef __ASM_X86_BITSPERLONG_H #define __ASM_X86_BITSPERLONG_H -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__ILP32__) # define __BITS_PER_LONG 64 #else # define __BITS_PER_LONG 32 diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -34,11 +34,10 @@ struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); -static void (*hv_kexec_handler)(void); -static void (*hv_crash_handler)(struct pt_regs *regs); - #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); void hyperv_vector_handler(struct pt_regs *regs) { @@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) hv_crash_handler = NULL; } EXPORT_SYMBOL_GPL(hv_remove_crash_handler); -#endif +#ifdef CONFIG_KEXEC_CORE static void hv_machine_shutdown(void) { if (kexec_in_progress && hv_kexec_handler) @@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) hv_crash_handler(regs); native_machine_crash_shutdown(regs); } - +#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) { @@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif +#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; +#endif mark_tsc_unstable("running on Hyper-V"); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -47,6 +47,7 @@ enum extra_reg_type { EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_LBR = 2, /* lbr_select */ EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ + EXTRA_REG_FE = 4, /* fe_* */ EXTRA_REG_MAX /* number of entries needed */ }; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3fefebfbdf4b..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + /* + * Note the low 8 bits eventsel code is not a continuous field, containing + * some #GPing bits. These are masked out. + */ + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), EVENT_EXTRA_END }; @@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ + INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ EVENT_CONSTRAINT_END }; @@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); +PMU_FORMAT_ATTR(frontend, "config1:0-23"); + static struct attribute *intel_arch3_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { NULL, }; +static struct attribute *skl_format_attr[] = { + &format_attr_frontend.attr, + NULL, +}; + static __initconst const struct x86_pmu core_pmu = { .name = "core", .handle_irq = x86_pmu_handle_irq, @@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + skl_format_attr); WARN_ON(!x86_pmu.format_attrs); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -10,12 +10,12 @@ enum perf_msr_id { PERF_MSR_EVENT_MAX, }; -bool test_aperfmperf(int idx) +static bool test_aperfmperf(int idx) { return boot_cpu_has(X86_FEATURE_APERFMPERF); } -bool test_intel(int idx) +static bool test_intel(int idx) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, - { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) } #ifdef CONFIG_KEXEC_FILE -static int get_nr_ram_ranges_callback(unsigned long start_pfn, - unsigned long nr_pfn, void *arg) +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) { - int *nr_ranges = arg; + unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; @@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ced->image = image; - walk_system_ram_range(0, -1, &nr_ranges, + walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); ced->max_nr_ranges = nr_ranges; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..39e585a554b7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +/* + * Called from fs/proc with a reference on @p to find the function + * which called into schedule(). This needs to be done carefully + * because the task might wake up and we might look at a stack + * changing under us. + */ +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long start, bottom, top, sp, fp, ip; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + start = (unsigned long)task_stack_page(p); + if (!start) + return 0; + + /* + * Layout of the stack page: + * + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) + * PADDING + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING + * stack + * ----------- bottom = start + sizeof(thread_info) + * thread_info + * ----------- start + * + * The tasks stack pointer points at the location where the + * framepointer is stored. The data on the stack is: + * ... IP FP ... IP FP + * + * We need to read FP and IP, so we need to adjust the upper + * bound by another unsigned long. + */ + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; + top -= 2 * sizeof(unsigned long); + bottom = start + sizeof(struct thread_info); + + sp = READ_ONCE(p->thread.sp); + if (sp < bottom || sp > top) + return 0; + + fp = READ_ONCE(*(unsigned long *)sp); + do { + if (fp < bottom || fp > top) + return 0; + ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); + if (!in_sched_functions(ip)) + return ip; + fp = READ_ONCE(*(unsigned long *)fp); + } while (count++ < 16 && p->state != TASK_RUNNING); + return 0; +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } - -#define top_esp (THREAD_SIZE - sizeof(unsigned long)) -#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long bp, sp, ip; - unsigned long stack_page; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack_page = (unsigned long)task_stack_page(p); - sp = p->thread.sp; - if (!stack_page || sp < stack_page || sp > top_esp+stack_page) - return 0; - /* include/asm-i386/system.h:switch_to() pushes bp last. */ - bp = *(unsigned long *) sp; - do { - if (bp < stack_page || bp > top_ebp+stack_page) - return 0; - ip = *(unsigned long *) (bp+4); - if (!in_sched_functions(ip)) - return ip; - bp = *(unsigned long *) bp; - } while (count++ < 16); - return 0; -} - diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) } EXPORT_SYMBOL_GPL(set_personality_ia32); -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long stack; - u64 fp, ip; - int count = 0; - - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) - return 0; - fp = *(u64 *)(p->thread.sp); - do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) - return 0; - ip = *(u64 *)(fp+8); - if (!in_sched_functions(ip)) - return ip; - fp = *(u64 *)fp; - } while (count++ < 16); - return 0; -} - long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) { int ret = 0; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 94b7d15db3fc..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } @@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } -#define MTRR_TYPE_UC_MINUS 7 -#define MTRR2PROTVAL_INVALID 0xff - -static u8 mtrr2protval[8]; - -static u8 fallback_mtrr_type(int mtrr) -{ - /* - * WT and WP aren't always available in the host PAT. Treat - * them as UC and UC- respectively. Everything else should be - * there. - */ - switch (mtrr) - { - case MTRR_TYPE_WRTHROUGH: - return MTRR_TYPE_UNCACHABLE; - case MTRR_TYPE_WRPROT: - return MTRR_TYPE_UC_MINUS; - default: - BUG(); - } -} - -static void build_mtrr2protval(void) -{ - int i; - u64 pat; - - for (i = 0; i < 8; i++) - mtrr2protval[i] = MTRR2PROTVAL_INVALID; - - /* Ignore the invalid MTRR types. */ - mtrr2protval[2] = 0; - mtrr2protval[3] = 0; - - /* - * Use host PAT value to figure out the mapping from guest MTRR - * values to nested page table PAT/PCD/PWT values. We do not - * want to change the host PAT value every time we enter the - * guest. - */ - rdmsrl(MSR_IA32_CR_PAT, pat); - for (i = 0; i < 8; i++) { - u8 mtrr = pat >> (8 * i); - - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) - mtrr2protval[mtrr] = __cm_idx2pte(i); - } - - for (i = 0; i < 8; i++) { - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { - u8 fallback = fallback_mtrr_type(i); - mtrr2protval[i] = mtrr2protval[fallback]; - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); - } - } -} - static __init int svm_hardware_setup(void) { int cpu; @@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void) } else kvm_disable_tdp(); - build_mtrr2protval(); return 0; err: @@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) return target_tsc - tsc; } -static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - - /* Unlike Intel, AMD takes the guest's CR0.CD into account. - * - * AMD does not have IPAT. To emulate it for the case of guests - * with no assigned devices, just set everything to WB. If guests - * have assigned devices, however, we cannot force WB for RAM - * pages only, so use the guest PAT directly. - */ - if (!kvm_arch_has_assigned_device(vcpu->kvm)) - *g_pat = 0x0606060606060606; - else - *g_pat = vcpu->arch.pat; -} - -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - u8 mtrr; - - /* - * 1. MMIO: trust guest MTRR, so same as item 3. - * 2. No passthrough: always map as WB, and force guest PAT to WB as well - * 3. Passthrough: can't guarantee the result, try to trust guest. - */ - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) - return 0; - - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && - kvm_read_cr0(vcpu) & X86_CR0_CD) - return _PAGE_NOCACHE; - - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - return mtrr2protval[mtrr]; -} - static void init_vmcb(struct vcpu_svm *svm, bool init_event) { struct vmcb_control_area *control = &svm->vmcb->control; @@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; - svm_set_guest_pat(svm, &save->g_pat); save->cr3 = 0; save->cr4 = 0; } @@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - - /* These are emulated via page tables. */ - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); - + /* + * re-enable caching here because the QEMU bios + * does not do it - this results in some delay at + * reboot + */ + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_IA32_CR_PAT: - if (npt_enabled) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; - vcpu->arch.pat = data; - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); - mark_dirty(svm->vmcb, VMCB_NPT); - break; - } - /* fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) return true; } +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + return 0; +} + static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64076740251e..06ef4908ba61 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) u64 ipat = 0; /* For VT-d and EPT combination - * 1. MMIO: guest may want to apply WC, trust it. + * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. So the same as item 1. + * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + if (is_mmio) { + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 991466bf8dee..92511d4b7236 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->pvclock_set_guest_stopped_request = false; } - pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; - /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; @@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) &vcpu->requests); ka->boot_vcpu_runs_old_kvmclock = tmp; - - ka->kvmclock_offset = -get_kernel_ns(); } vcpu->arch.time = data; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) * has been zapped already via cleanup_highmem(). */ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -705,6 +705,70 @@ out: } /* + * Iterate the EFI memory map in reverse order because the regions + * will be mapped top-down. The end result is the same as if we had + * mapped things forward, but doesn't require us to change the + * existing implementation of efi_map_region(). + */ +static inline void *efi_map_next_entry_reverse(void *entry) +{ + /* Initial call */ + if (!entry) + return memmap.map_end - memmap.desc_size; + + entry -= memmap.desc_size; + if (entry < memmap.map) + return NULL; + + return entry; +} + +/* + * efi_map_next_entry - Return the next EFI memory map descriptor + * @entry: Previous EFI memory map descriptor + * + * This is a helper function to iterate over the EFI memory map, which + * we do in different orders depending on the current configuration. + * + * To begin traversing the memory map @entry must be %NULL. + * + * Returns %NULL when we reach the end of the memory map. + */ +static void *efi_map_next_entry(void *entry) +{ + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { + /* + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE + * config table feature requires us to map all entries + * in the same order as they appear in the EFI memory + * map. That is to say, entry N must have a lower + * virtual address than entry N+1. This is because the + * firmware toolchain leaves relative references in + * the code/data sections, which are split and become + * separate EFI memory regions. Mapping things + * out-of-order leads to the firmware accessing + * unmapped addresses. + * + * Since we need to map things this way whether or not + * the kernel actually makes use of + * EFI_PROPERTIES_TABLE, let's just switch to this + * scheme by default for 64-bit. + */ + return efi_map_next_entry_reverse(entry); + } + + /* Initial call */ + if (!entry) + return memmap.map; + + entry += memmap.desc_size; + if (entry >= memmap.map_end) + return NULL; + + return entry; +} + +/* * Map the efi memory ranges of the runtime services and update new_mmap with * virtual addresses. */ @@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) unsigned long left = 0; efi_memory_desc_t *md; - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + p = NULL; + while ((p = efi_map_next_entry(p))) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) { #ifdef CONFIG_X86_64 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 30d12afe52ed..993b7a71386d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -33,6 +33,10 @@ #include <linux/memblock.h> #include <linux/edd.h> +#ifdef CONFIG_KEXEC_CORE +#include <linux/kexec.h> +#endif + #include <xen/xen.h> #include <xen/events.h> #include <xen/interface/xen.h> @@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) /* Fast syscall setup is all done in hypercalls, so these are all ignored. Stub them out here to stop Xen console noise. */ + break; default: if (!pmu_msr_write(msr, low, high, &ret)) @@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = { .notifier_call = xen_hvm_cpu_notify, }; +#ifdef CONFIG_KEXEC_CORE +static void xen_hvm_shutdown(void) +{ + native_machine_shutdown(); + if (kexec_in_progress) + xen_reboot(SHUTDOWN_soft_reset); +} + +static void xen_hvm_crash_shutdown(struct pt_regs *regs) +{ + native_machine_crash_shutdown(regs); + xen_reboot(SHUTDOWN_soft_reset); +} +#endif + static void __init xen_hvm_guest_init(void) { if (xen_pv_domain()) @@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void) x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); +#ifdef CONFIG_KEXEC_CORE + machine_ops.shutdown = xen_hvm_shutdown; + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; +#endif } #endif diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index bfc08b13044b..660b3cfef234 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -112,6 +112,15 @@ static unsigned long *p2m_identity; static pte_t *p2m_missing_pte; static pte_t *p2m_identity_pte; +/* + * Hint at last populated PFN. + * + * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack + * can avoid scanning the whole P2M (which may be sized to account for + * hotplugged memory). + */ +static unsigned long xen_p2m_last_pfn; + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void) else HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); - HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; HYPERVISOR_shared_info->arch.p2m_generation = 0; HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; HYPERVISOR_shared_info->arch.p2m_cr3 = @@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void) static struct vm_struct vm; unsigned long p2m_limit; + xen_p2m_last_pfn = xen_max_p2m_pfn; + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), @@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn) free_p2m_page(p2m); } + /* Expanded the p2m? */ + if (pfn > xen_p2m_last_pfn) { + xen_p2m_last_pfn = pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; + } + return true; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f5ef6746d47a..1c30e4ab1022 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages, limit; domid_t domid = DOMID_SELF; - int ret; + long ret; limit = xen_get_pages_limit(); max_pages = limit; @@ -798,7 +798,7 @@ char * __init xen_memory_setup(void) xen_ignore_unusable(); /* Make sure the Xen-supplied memory map is well-ordered. */ - sanitize_e820_map(xen_e820_map, xen_e820_map_entries, + sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), &xen_e820_map_entries); max_pages = xen_get_max_pages(); diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -28,4 +28,5 @@ generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) return cpu; } -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; @@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; - for_each_online_cpu(i) { + for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) @@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) queue = 0; for_each_possible_cpu(i) { - if (!cpu_online(i)) { + if (!cpumask_test_cpu(i, online_mask)) { map[i] = 0; continue; } @@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) if (!map) return NULL; - if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) return map; kfree(map); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) unsigned int i, first = 1; ssize_t ret = 0; - blk_mq_disable_hotplug(); - for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); @@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) first = 0; } - blk_mq_enable_hotplug(); - ret += sprintf(ret + page, "\n"); return ret; } @@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return; hctx_for_each_ctx(hctx, ctx, i) @@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i, ret; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return 0; ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); @@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) struct blk_mq_ctx *ctx; int i, j; + blk_mq_disable_hotplug(); + queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); @@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); + + q->mq_sysfs_init_done = false; + blk_mq_enable_hotplug(); } static void blk_mq_sysfs_init(struct request_queue *q) @@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) struct blk_mq_hw_ctx *hctx; int ret, i; + blk_mq_disable_hotplug(); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) - return ret; + goto out; kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - hctx->flags |= BLK_MQ_F_SYSFS_UP; ret = blk_mq_register_hctx(hctx); if (ret) break; } - if (ret) { + if (ret) blk_mq_unregister_disk(disk); - return ret; - } + else + q->mq_sysfs_init_done = true; +out: + blk_mq_enable_hotplug(); - return 0; + return ret; } EXPORT_SYMBOL_GPL(blk_mq_register_disk); @@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + if (!q->mq_sysfs_init_done) + return; + queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } @@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; + if (!q->mq_sysfs_init_done) + return ret; + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ed96474d75cb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { - struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_hw_ctx *hctx; + int i; + + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; + + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!blk_mq_hw_queue_mapped(hctx)) + continue; + + if (tags->nr_reserved_tags) + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, + false); + } - if (tags->nr_reserved_tags) - bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); - bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, - false); } -EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, + void *priv); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..7785ae96267a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ -void blk_mq_complete_request(struct request *rq) +void blk_mq_complete_request(struct request *rq, int error) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) + if (!blk_mark_rq_complete(rq)) { + rq->errors = error; __blk_mq_complete_request(rq); + } } EXPORT_SYMBOL(blk_mq_complete_request); @@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ - if (unlikely(blk_queue_dying(rq->q))) { - rq->errors = -EIO; - blk_mq_complete_request(rq); - } + if (unlikely(blk_queue_dying(rq->q))) + blk_mq_complete_request(rq, -EIO); return; } if (rq->cmd_flags & REQ_NO_TIMEOUT) @@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) .next = 0, .next_set = 0, }; - struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - /* - * If not software queues are currently mapped to this - * hardware queue, there's nothing to check - */ - if (!blk_mq_hw_queue_mapped(hctx)) - continue; - - blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); - } + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { + struct blk_mq_hw_ctx *hctx; + queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) @@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static void blk_mq_map_swqueue(struct request_queue *q) +static void blk_mq_map_swqueue(struct request_queue *q, + const struct cpumask *online_mask) { unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; + /* + * Avoid others reading imcomplete hctx->cpumask through sysfs + */ + mutex_lock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpu_online(i)) + if (!cpumask_test_cpu(i, online_mask)) continue; hctx = q->mq_ops->map_queue(q, i); cpumask_set_cpu(i, hctx->cpumask); - cpumask_set_cpu(i, hctx->tags->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + mutex_unlock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_ctxmap *map = &hctx->ctx_map; @@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } + + queue_for_each_ctx(q, ctx, i) { + if (!cpumask_test_cpu(i, online_mask)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->tags->cpumask); + } } static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) @@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) kfree(hctx); } + kfree(q->mq_map); + q->mq_map = NULL; + kfree(q->queue_hw_ctx); /* ctx kobj stays in queue_ctx */ @@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (blk_mq_init_hw_queues(q, set)) goto err_hctxs; + get_online_cpus(); mutex_lock(&all_q_mutex); - list_add_tail(&q->all_q_node, &all_q_list); - mutex_unlock(&all_q_mutex); + list_add_tail(&q->all_q_node, &all_q_list); blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q, cpu_online_mask); - blk_mq_map_swqueue(q); + mutex_unlock(&all_q_mutex); + put_online_cpus(); return q; @@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; + mutex_lock(&all_q_mutex); + list_del_init(&q->all_q_node); + mutex_unlock(&all_q_mutex); + blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); percpu_ref_exit(&q->mq_usage_counter); - - kfree(q->mq_map); - - q->mq_map = NULL; - - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); } /* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) +static void blk_mq_queue_reinit(struct request_queue *q, + const struct cpumask *online_mask) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); blk_mq_sysfs_unregister(q); - blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); /* * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe @@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) * involves free and re-allocate memory, worthy doing?) */ - blk_mq_map_swqueue(q); + blk_mq_map_swqueue(q, online_mask); blk_mq_sysfs_register(q); } @@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { struct request_queue *q; + int cpu = (unsigned long)hcpu; + /* + * New online cpumask which is going to be set in this hotplug event. + * Declare this cpumasks as global as cpu-hotplug operation is invoked + * one-by-one and dynamically allocating this could result in a failure. + */ + static struct cpumask online_new; /* - * Before new mappings are established, hotadded cpu might already - * start handling requests. This doesn't break anything as we map - * offline CPUs to first hardware queue. We will re-init the queue - * below to get optimal settings. + * Before hotadded cpu starts handling requests, new mappings must + * be established. Otherwise, these requests in hw queue might + * never be dispatched. + * + * For example, there is a single hw queue (hctx) and two CPU queues + * (ctx0 for CPU0, and ctx1 for CPU1). + * + * Now CPU1 is just onlined and a request is inserted into + * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is + * still zero. + * + * And then while running hw queue, flush_busy_ctxs() finds bit0 is + * set in pending bitmap and tries to retrieve requests in + * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, + * so the request in ctx1->rq_list is ignored. */ - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && - action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DEAD: + case CPU_UP_CANCELED: + cpumask_copy(&online_new, cpu_online_mask); + break; + case CPU_UP_PREPARE: + cpumask_copy(&online_new, cpu_online_mask); + cpumask_set_cpu(cpu, &online_new); + break; + default: return NOTIFY_OK; + } mutex_lock(&all_q_mutex); @@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, } list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_queue_reinit(q); + blk_mq_queue_reinit(q, &online_new); list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_unfreeze_queue(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); * CPU -> queue mappings */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); -extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) srlen = cert->raw_serial_size; q = cert->raw_serial; } - if (srlen > 1 && *q == 0) { - srlen--; - q++; - } ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) goto err_exit; mutex_lock(&ec->mutex); + result = -ENODATA; list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { + result = 0; q->handler = acpi_ec_get_query_handler(handler); ec_dbg_evt("Query(0x%02x) scheduled", q->handler->query_bit); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && + acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) PIRQ_PENALTY_PCI_POSSIBLE; } } - /* Add a penalty for the SCI */ - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; + return 0; } @@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) irq = link->irq.possible[i]; } } + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " + "Try pci=noacpi or acpi=off\n", + acpi_device_name(link->device), + acpi_device_bid(link->device)); + return -ENODEV; + } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { @@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) } } +bool acpi_isa_irq_available(int irq) +{ + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); +} + /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) u32 microvolt[3] = {0}; int count, ret; - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); - if (!count) + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) return 0; + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + /* There can be one or three elements here */ if (count != 1 && count != 3) { dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", @@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); * share a common logic which is isolated here. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. * * Locking: The internal device_opp and opp structures are RCU protected. @@ -1151,7 +1158,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f42f2bac6466..4c55cfbad19e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) { - snprintf(buf, buf_size, "%x", max_val); - return strlen(buf); + return snprintf(NULL, 0, "%x", max_val); } static ssize_t regmap_name_read_file(struct file *file, @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos >= count - 1 - tot_len) + if (buf_pos + tot_len + 1 >= count) break; /* Format the register */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a295b98c6bae..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..6f04771f1019 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, spin_unlock_irqrestore(req->q->queue_lock, flags); return; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; - else - req->errors = status; + status = -EINTR; } else { - req->errors = nvme_error_status(status); + status = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + blk_mq_complete_request(req, status); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, static int xen_blkif_disconnect(struct xen_blkif *blkif) { + struct pending_req *req, *n; + int i = 0, j; + if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); + /* Check that there is no request in use */ + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { + list_del(&req->free_list); + + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) + kfree(req->segments[j]); + + for (j = 0; j < MAX_INDIRECT_PAGES; j++) + kfree(req->indirect_pages[j]); + + kfree(req); + i++; + } + + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); + blkif->nr_ring_pages = 0; + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) { - struct pending_req *req, *n; - int i = 0, j; xen_blkif_disconnect(blkif); xen_vbd_free(&blkif->vbd); @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) BUG_ON(!list_empty(&blkif->free_pages)); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); - /* Check that there is no request in use */ - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { - list_del(&req->free_list); - - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) - kfree(req->segments[j]); - - for (j = 0; j < MAX_INDIRECT_PAGES; j++) - kfree(req->indirect_pages[j]); - - kfree(req); - i++; - } - - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - kmem_cache_free(xen_blkif_cachep, blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..611170896b8c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; spin_lock_irqsave(&info->io_lock, flags); @@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) continue; } - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } - if (unlikely(req->errors)) { - if (req->errors == -EOPNOTSUPP) - req->errors = 0; + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; default: BUG(); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17681..0ebca8ba7bc4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL config ARM_CCI500_PMU bool "ARM CCI500 PMU support" - default y depends on (ARM && CPU_V7) || ARM64 depends on PERF_EVENTS select ARM_CCI_PMU diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 7c1e1f58e2da..2fe37f708dc7 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, * the values for DIV_COPY and DIV_HPM dividers need not be set. */ div0 = cfg_data->div0; - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { div1 = cfg_data->div1; if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) div1 = readl(base + E4210_DIV_CPU1) & @@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; WARN_ON(alt_div >= MAX_DIV); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { /* * In Exynos4210, ATB clock parent is also mout_core. So * ATB clock also needs to be mantained at safe speed. @@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, writel(div0, base + E4210_DIV_CPU0); wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { writel(div1, base + E4210_DIV_CPU1); wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, DIV_MASK_ALL); @@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, unsigned long mux_reg; /* find out the divider values to use for clock data */ - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { while ((cfg_data->prate * 1000) != ndata->new_rate) { if (cfg_data->prate == 0) return -EINVAL; @@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); div_mask |= E4210_DIV0_ATB_MASK; } diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 676ee8f6d813..8831e1a05367 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), - DT_CLK(NULL, "uart4_ick", "uart4_ick"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), @@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = { static struct ti_dt_clk omap36xx_clks[] = { DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), DT_CLK(NULL, "uart4_fck", "uart4_fck"), + DT_CLK(NULL, "uart4_ick", "uart4_ick"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9b5b289e6334..a911d7de3377 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -18,7 +18,6 @@ #include "clock.h" -#define DRA7_DPLL_ABE_DEFFREQ 180633600 #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 @@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = { int __init dra7xx_dt_clk_init(void) { int rc; - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; + struct clk *dpll_ck, *hdcp_ck; ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); - - rc = clk_set_parent(abe_dpll_mux, sys_clkin2); - if (!rc) - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); - if (rc) - pr_err("%s: failed to configure ABE DPLL!\n", __func__); - - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); - if (rc) - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); - dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); if (rc) diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index 90d7d8a21c49..1ddc288fce4e 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (unlikely(!clk->enable_reg)) { + if (unlikely(IS_ERR(clk->enable_reg))) { pr_err("%s: %s missing enable_reg\n", __func__, clk_hw_get_name(hw)); ret = -EINVAL; @@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { + if (IS_ERR(clk->enable_reg)) { /* * 'independent' here refers to a clock which is not * controlled by its parent. diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 798277227de7..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { struct acpi_cpufreq_data *data = policy->driver_data; + if (unlikely(!data)) + return -ENODEV; + return cpufreq_show_cpus(data->freqdomain_cpus, buf); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ef5ed9470de9..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) * since this is a core component, and is essential for the * subsequent light-weight ->init() to succeed. */ - if (cpufreq_driver->exit) + if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); + policy->freq_table = NULL; + } } /** diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 3927ed9fdbd5..ca848cc6a8fd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); - goto err_dev; + goto err_out; } mutex_unlock(&devfreq->lock); @@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); -err_dev: kfree(devfreq); err_out: return ERR_PTR(err); @@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, ret = PTR_ERR(governor); goto out; } - if (df->governor == governor) + if (df->governor == governor) { + ret = 0; goto out; + } if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; @@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, @@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | @@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -13,6 +13,7 @@ */ #include <linux/efi.h> +#include <linux/sort.h> #include <asm/efi.h> #include "efistub.h" @@ -305,6 +306,44 @@ fail: */ #define EFI_RT_VIRTUAL_BASE 0x40000000 +static int cmp_mem_desc(const void *l, const void *r) +{ + const efi_memory_desc_t *left = l, *right = r; + + return (left->phys_addr > right->phys_addr) ? 1 : -1; +} + +/* + * Returns whether region @left ends exactly where region @right starts, + * or false if either argument is NULL. + */ +static bool regions_are_adjacent(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + u64 left_end; + + if (left == NULL || right == NULL) + return false; + + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; + + return left_end == right->phys_addr; +} + +/* + * Returns whether region @left and region @right have compatible memory type + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. + */ +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | + EFI_MEMORY_WC | EFI_MEMORY_UC | + EFI_MEMORY_RUNTIME; + + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; +} + /* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; - efi_memory_desc_t *out = runtime_map; + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; int l; - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *in = (void *)memory_map + l; + /* + * To work around potential issues with the Properties Table feature + * introduced in UEFI 2.5, which may split PE/COFF executable images + * in memory into several RuntimeServicesCode and RuntimeServicesData + * regions, we need to preserve the relative offsets between adjacent + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. + * The easiest way to find adjacent regions is to sort the memory map + * before traversing it. + */ + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); + + for (l = 0; l < map_size; l += desc_size, prev = in) { u64 paddr, size; + in = (void *)memory_map + l; if (!(in->attribute & EFI_MEMORY_RUNTIME)) continue; + paddr = in->phys_addr; + size = in->num_pages * EFI_PAGE_SIZE; + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - paddr = round_down(in->phys_addr, SZ_64K); - size = round_up(in->num_pages * EFI_PAGE_SIZE + - in->phys_addr - paddr, SZ_64K); - - /* - * Avoid wasting memory on PTEs by choosing a virtual base that - * is compatible with section mappings if this region has the - * appropriate size and physical alignment. (Sections are 2 MB - * on 4k granule kernels) - */ - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) - efi_virt_base = round_up(efi_virt_base, SZ_2M); + if (!regions_are_adjacent(prev, in) || + !regions_have_compatible_memory_type_attrs(prev, in)) { + + paddr = round_down(in->phys_addr, SZ_64K); + size += in->phys_addr - paddr; + + /* + * Avoid wasting memory on PTEs by choosing a virtual + * base that is compatible with section mappings if this + * region has the appropriate size and physical + * alignment. (Sections are 2 MB on 4k granule kernels) + */ + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) + efi_virt_base = round_up(efi_virt_base, SZ_2M); + else + efi_virt_base = round_up(efi_virt_base, SZ_64K); + } in->virt_addr = efi_virt_base + in->phys_addr - paddr; efi_virt_base += size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 1c3fc99c5465..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..cb3c274edb0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) uint64_t *chunk_array_user; uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; + unsigned size; + int i; int ret; if (cs->in.num_chunks == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -27,19 +27,6 @@ #include "cgs_common.h" /** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - -/** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source * @src_id: interrupt source ID @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..bf27a07dbce3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..7412caedcf7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index dd845f82cc24..4649bd2ed340 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..6cddae44fa6e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..1aa657fe31cb 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + (void) vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { { .compatible = "stericsson,abx500-temp" }, {}, }; +MODULE_DEVICE_TABLE(of, abx500_temp_match); #endif static struct platform_driver abx500_temp_driver = { diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { { .compatible = "gpio-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_gpio_fan_match); #endif /* CONFIG_OF_GPIO */ static int gpio_fan_probe(struct platform_device *pdev) diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { { .compatible = "pwm-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { .name = "C6-SKL", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 75, + .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, @@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = { .name = "C8-SKL", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 174, + .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { + .name = "C9-SKL", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, + .target_residency = 5000, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { .name = "C10-SKL", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; @@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm return 0; } -static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) -{ - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *seg; - struct mlx5_core_mr mr; - int err; - - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - seg = &in->seg; - seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; - seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - seg->start_addr = 0; - - err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), - NULL, NULL, NULL); - if (err) { - mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); - goto err_in; - } - - kfree(in); - *key = mr.key; - - return 0; - -err_in: - kfree(in); - - return err; -} - -static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) -{ - struct mlx5_core_mr mr; - int err; - - memset(&mr, 0, sizeof(mr)); - mr.key = key; - err = mlx5_core_destroy_mkey(dev->mdev, &mr); - if (err) - mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); -} - static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) @@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, kfree(pd); return ERR_PTR(-EFAULT); } - } else { - err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); - if (err) { - mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); - kfree(pd); - return ERR_PTR(err); - } } return &pd->ibpd; @@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); - if (!pd->uobject) - free_pa_mkey(mdev, mpd->pa_lkey); - mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); kfree(mpd); @@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; - u32 rsvd_lkey; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); - ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); - if (ret) { - pr_err("Failed to query special context %d\n", ret); - return ret; - } - dev->ib_dev.local_dma_lkey = rsvd_lkey; - devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); @@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte struct mlx5_ib_pd { struct ib_pd ibpd; u32 pdn; - u32 pa_lkey; }; /* Use macros here so that don't have to duplicate @@ -213,7 +212,6 @@ struct mlx5_ib_qp { int uuarn; int create_type; - u32 pa_lkey; /* Store signature errors */ bool signature_en; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); - else - qp->pa_lkey = to_mpd(pd)->pa_lkey; } if (err) @@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); dseg->addr = cpu_to_be64(mfrpl->map); dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); - dseg->lkey = cpu_to_be32(pd->pa_lkey); + dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); } static __be32 send_ieth(struct ib_send_wr *wr) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..4cd5428a2399 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -80,7 +80,7 @@ enum { IPOIB_NUM_WC = 4, IPOIB_MAX_PATH_REC_QUEUE = 3, - IPOIB_MAX_MCAST_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_INITIALIZED = 1, @@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey); +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); int ipoib_init_qp(struct net_device *dev); int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..f74316e679d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) unsigned long dt; unsigned long flags; int i; + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + struct net_device *dev = priv->dev; if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; @@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { + u8 *mgid = neigh->daddr + 4; + + /* Is this multicast ? */ + if (*mgid == 0xff) { + mcast = __ipoib_mcast_find(dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + } + rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, lockdep_is_held(&priv->lock))); @@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + ipoib_mcast_leave(dev, mcast); } static void ipoib_reap_neigh(struct work_struct *work) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..136cbefe00f8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, return mcast; } -static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; @@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) rec.hop_limit = priv->broadcast->mcmember.hop_limit; /* - * Historically Linux IPoIB has never properly supported SEND - * ONLY join. It emulated it by not providing all the required - * attributes, which is enough to prevent group creation and - * detect if there are full members or not. A major problem - * with supporting SEND ONLY is detecting when the group is - * auto-destroyed as IPoIB will cache the MLID.. + * Send-only IB Multicast joins do not work at the core + * IB layer yet, so we can't use them here. However, + * we are emulating an Ethernet multicast send, which + * does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspecitive, of Ethernet multicast + * operation. For now, we do a full join, maybe later + * when the core IB layers support send only joins we + * will use them. */ -#if 1 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; -#else +#if 0 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) rec.join_state = 4; #endif @@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) return 0; } -static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -611,6 +611,7 @@ extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; extern unsigned int iser_max_sectors; +extern bool iser_always_reg; int iser_assign_reg_ops(struct iser_device *device); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -803,11 +803,12 @@ static int iser_reg_prot_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); @@ -817,11 +818,12 @@ static int iser_reg_data_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); @@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, struct iser_mem_reg *reg = &task->rdma_reg[dir]; struct iser_mem_reg *data_reg; struct iser_fr_desc *desc = NULL; + bool use_dma_key; int err; err = iser_handle_unaligned_buf(task, mem, dir); if (unlikely(err)) return err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { desc = device->reg_ops->reg_desc_get(ib_conn); reg->mem_h = desc; } @@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, else data_reg = &task->desc.data_reg; - err = iser_reg_data_sg(task, mem, desc, data_reg); + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); if (unlikely(err)) goto err_reg; @@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, if (unlikely(err)) goto err_reg; - err = iser_reg_prot_sg(task, mem, desc, prot_reg); + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); if (unlikely(err)) goto err_reg; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -173,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); + if (device->mr) + (void)ib_dereg_mr(device->mr); ib_dealloc_pd(device->pd); kfree(device->comps); diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY config JOYSTICK_ZHENHUA tristate "5-byte Zhenhua RC transmitter" select SERIO + select BITREVERSE help Say Y here if you have a Zhen Hua PPM-4CH transmitter which is supplied with a ready to fly micro electric indoor helicopters diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) if (w->counter == 24) { /* full frame */ walkera0701_parse_frame(w); w->counter = NO_SYNC; - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ w->counter = 0; } else { if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) @@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) } else w->counter = NO_SYNC; } - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ w->counter = 0; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); if (error) - return error; + goto err_free_keypad; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, default: reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; - }; + } error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - for_each_set_bit(i, dev->absbit, ABS_CNT) { + for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -60,7 +60,7 @@ struct elan_transport_ops { int (*get_sm_version)(struct i2c_client *client, u8* ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); - int (*get_product_id)(struct i2c_client *client, u8 *id); + int (*get_product_id)(struct i2c_client *client, u16 *id); int (*get_max)(struct i2c_client *client, unsigned int *max_x, unsigned int *max_y); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -40,7 +40,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.0" +#define ELAN_DRIVER_VERSION "1.6.1" #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -76,7 +76,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; - u8 product_id; + u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; @@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, u16 *signature_address) { switch (iap_version) { + case 0x00: + case 0x06: case 0x08: *validpage_count = 512; break; + case 0x03: + case 0x07: case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; + case 0x0E: + *validpage_count = 640; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, &data->fw_signature_address); - if (error) { - dev_err(&data->client->dev, - "unknown iap version %d\n", data->iap_version); - return error; - } + if (error) + dev_warn(&data->client->dev, + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", + data->iap_version, data->ic_type); return 0; } @@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; + if (data->fw_validpage_count == 0) + return -EINVAL; + /* Look for a firmware with the product id appended. */ fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return 0; } -static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) +static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[0]; + *id = le16_to_cpup((__le16 *)val); return 0; } diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) +static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[1]; + *id = be16_to_cpup((__be16 *)val); return 0; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) struct synaptics_data *priv = psmouse->private; priv->mode = 0; - - if (priv->absolute_mode) { + if (priv->absolute_mode) priv->mode |= SYN_BIT_ABSOLUTE_MODE; - if (SYN_CAP_EXTENDED(priv->capabilities)) - priv->mode |= SYN_BIT_W_MODE; - } - - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) + if (priv->disable_gesture) priv->mode |= SYN_BIT_DISABLE_GESTURE; - if (psmouse->rate >= 80) priv->mode |= SYN_BIT_HIGH_RATE; + if (SYN_CAP_EXTENDED(priv->capabilities)) + priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, - command == PS2_CMD_RESET_BAT ? 1000 : 200)) - goto out; + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } - for (i = 0; i < send; i++) - if (ps2_sendbyte(ps2dev, param[i], 200)) - goto out; + for (i = 0; i < send; i++) { + if (ps2_sendbyte(ps2dev, param[i], 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } + } /* * The reset command takes a long time to execute. @@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) !(ps2dev->flags & PS2_FLAG_CMD), timeout); } + serio_pause_rx(ps2dev->serio); + if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) - goto out; + goto out_reset_flags; rc = 0; - out: - serio_pause_rx(ps2dev->serio); + out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -194,6 +194,7 @@ static int __init parkbd_init(void) parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); + parport_unregister_device(parkbd_dev); return -ENOMEM; } diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -94,7 +94,7 @@ struct imx6ul_tsc { * TSC module need ADC to get the measure value. So * before config TSC, we should initialize ADC module. */ -static void imx6ul_adc_init(struct imx6ul_tsc *tsc) +static int imx6ul_adc_init(struct imx6ul_tsc *tsc) { int adc_hc = 0; int adc_gc; @@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) timeout = wait_for_completion_timeout (&tsc->completion, ADC_TIMEOUT); - if (timeout == 0) + if (timeout == 0) { dev_err(tsc->dev, "Timeout for adc calibration\n"); + return -ETIMEDOUT; + } adc_gs = readl(tsc->adc_regs + REG_ADC_GS); - if (adc_gs & ADC_CALF) + if (adc_gs & ADC_CALF) { dev_err(tsc->dev, "ADC calibration failed\n"); + return -EINVAL; + } /* TSC need the ADC work in hardware trigger */ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); adc_cfg |= ADC_HARDWARE_TRIGGER; writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); + + return 0; } /* @@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); } -static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) +static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) { - imx6ul_adc_init(tsc); + int err; + + err = imx6ul_adc_init(tsc); + if (err) + return err; imx6ul_tsc_channel_config(tsc); imx6ul_tsc_set(tsc); + + return 0; } static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) @@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) return err; } - imx6ul_tsc_init(tsc); - - return 0; + return imx6ul_tsc_init(tsc); } static void imx6ul_tsc_close(struct input_dev *input_dev) @@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) int tsc_irq; int adc_irq; - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) return -ENOMEM; @@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) if (!input_dev) return -ENOMEM; - input_dev->name = "iMX6UL TouchScreen Controller"; + input_dev->name = "iMX6UL Touchscreen Controller"; input_dev->id.bustype = BUS_HOST; input_dev->open = imx6ul_tsc_open; @@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) } adc_irq = platform_get_irq(pdev, 1); - if (adc_irq <= 0) { + if (adc_irq < 0) { dev_err(&pdev->dev, "no adc irq resource?\n"); return adc_irq; } @@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) goto out; } - imx6ul_tsc_init(tsc); + retval = imx6ul_tsc_init(tsc); } out: diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) if (of_property_read_u32(np, "x-size", &pdata->x_size)) { dev_err(dev, "failed to get x-size property\n"); return NULL; - }; + } if (of_property_read_u32(np, "y-size", &pdata->y_size)) { dev_err(dev, "failed to get y-size property\n"); return NULL; - }; + } of_property_read_u32(np, "contact-threshold", &pdata->contact_threshold); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..d9da766719c8 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST endmenu config IOMMU_IOVA - bool + tristate config OF_IOMMU def_bool y diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..041bc1810a86 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev, /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + /* Ensure we reserve the whole size-aligned region */ + nrpages = __roundup_pow_of_two(nrpages); if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* @@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void) static int __init iommu_init_mempool(void) { int ret; - ret = iommu_iova_cache_init(); + ret = iova_cache_get(); if (ret) return ret; @@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void) kmem_cache_destroy(iommu_domain_cache); domain_error: - iommu_iova_cache_destroy(); + iova_cache_put(); return -ENOMEM; } @@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); - iommu_iova_cache_destroy(); + iova_cache_put(); } static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -18,42 +18,9 @@ */ #include <linux/iova.h> +#include <linux/module.h> #include <linux/slab.h> -static struct kmem_cache *iommu_iova_cache; - -int iommu_iova_cache_init(void) -{ - int ret = 0; - - iommu_iova_cache = kmem_cache_create("iommu_iova", - sizeof(struct iova), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_iova_cache) { - pr_err("Couldn't create iova cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -void iommu_iova_cache_destroy(void) -{ - kmem_cache_destroy(iommu_iova_cache); -} - -struct iova *alloc_iova_mem(void) -{ - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); -} - -void free_iova_mem(struct iova *iova) -{ - kmem_cache_free(iommu_iova_cache, iova); -} - void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn, unsigned long pfn_32bit) @@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit; } +EXPORT_SYMBOL_GPL(init_iova_domain); static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) @@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) } } -/* Computes the padding size required, to make the - * the start address naturally aligned on its size +/* + * Computes the padding size required, to make the start address + * naturally aligned on the power-of-two order of its size */ -static int -iova_get_pad_size(int size, unsigned int limit_pfn) +static unsigned int +iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - unsigned int pad_size = 0; - unsigned int order = ilog2(size); - - if (order) - pad_size = (limit_pfn + 1) % (1 << order); - - return pad_size; + return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) rb_insert_color(&iova->node, root); } +static struct kmem_cache *iova_cache; +static unsigned int iova_cache_users; +static DEFINE_MUTEX(iova_cache_mutex); + +struct iova *alloc_iova_mem(void) +{ + return kmem_cache_alloc(iova_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL(alloc_iova_mem); + +void free_iova_mem(struct iova *iova) +{ + kmem_cache_free(iova_cache, iova); +} +EXPORT_SYMBOL(free_iova_mem); + +int iova_cache_get(void) +{ + mutex_lock(&iova_cache_mutex); + if (!iova_cache_users) { + iova_cache = kmem_cache_create( + "iommu_iova", sizeof(struct iova), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!iova_cache) { + mutex_unlock(&iova_cache_mutex); + printk(KERN_ERR "Couldn't create iova cache\n"); + return -ENOMEM; + } + } + + iova_cache_users++; + mutex_unlock(&iova_cache_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_cache_get); + +void iova_cache_put(void) +{ + mutex_lock(&iova_cache_mutex); + if (WARN_ON(!iova_cache_users)) { + mutex_unlock(&iova_cache_mutex); + return; + } + iova_cache_users--; + if (!iova_cache_users) + kmem_cache_destroy(iova_cache); + mutex_unlock(&iova_cache_mutex); +} +EXPORT_SYMBOL_GPL(iova_cache_put); + /** * alloc_iova - allocates an iova * @iovad: - iova domain in question @@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - /* If size aligned is set then round the size to - * to next power of two. - */ - if (size_aligned) - size = __roundup_pow_of_two(size); - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); @@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, return new_iova; } +EXPORT_SYMBOL_GPL(alloc_iova); /** * find_iova - find's an iova for a given pfn @@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } +EXPORT_SYMBOL_GPL(find_iova); /** * __free_iova - frees the given iova @@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } +EXPORT_SYMBOL_GPL(__free_iova); /** * free_iova - finds and frees the iova for a given pfn @@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) __free_iova(iovad, iova); } +EXPORT_SYMBOL_GPL(free_iova); /** * put_iova_domain - destroys the iova doamin @@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(put_iova_domain); static int __is_range_overlap(struct rb_node *node, @@ -467,6 +480,7 @@ finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } +EXPORT_SYMBOL_GPL(reserve_iova); /** * copy_reserved_iova - copies the reserved between domains @@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(copy_reserved_iova); struct iova * split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, @@ -534,3 +549,6 @@ error: free_iova_mem(prev); return NULL; } + +MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) dev_alias->dev_id = alias; if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + dev_alias->count += its_pci_msi_vec_count(pdev); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ac7ae2b3cb83..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) out: spin_unlock(&lpi_lock); + if (!bitmap) + *base = *nr_ids = 0; + return bitmap; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index af2f16bb8a94..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) intrmask[i] = gic_read(intrmask_reg); pending_reg += gic_reg_step; intrmask_reg += gic_reg_step; + + if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) + continue; + + pending[i] |= (u64)gic_read(pending_reg) << 32; + intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; + pending_reg += gic_reg_step; + intrmask_reg += gic_reg_step; } bitmap_and(pending, pending, intrmask, gic_shared_intrs); @@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, cpumask_first(&tmp)); + gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); /* Update the pcpu_masks */ for (i = 0; i < NR_CPUS; i++) @@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, GIC_SHARED_TO_HWIRQ(intr)); int i; - gic_map_to_vpe(intr, cpu); + gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); for (i = 0; i < NR_CPUS; i++) clear_bit(intr, pcpu_masks[i].pcpu_mask); set_bit(intr, pcpu_masks[cpu].pcpu_mask); diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index de36237d7c6b..051645498b53 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOTSUPP; dev_err(&pdev->dev, "IO mapped PCI devices are not supported\n"); - goto out_release; + goto out_iounmap; } pci_set_drvdata(pdev, priv); @@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); if (ret < 0) - goto out_iounmap; + goto out_mcb_bus; num_cells = ret; dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); @@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +out_mcb_bus: + mcb_release_bus(priv->bus); out_iounmap: iounmap(priv->base); out_release: diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, - bitmap->cluster_slot); + mddev_is_clustered(bitmap->mddev) + ? bitmap->cluster_slot : 0); if (ret) goto err; diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c @@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", .version = {1, 0, 0}, - .hint_size = 0, + .hint_size = 4, .owner = THIS_MODULE, .create = wb_create }; diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -EINVAL; } - tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); + tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); if (!tmp_store) { ti->error = "Exception store allocation failed"; return -ENOMEM; @@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, else if (persistent == 'N') type = get_type("N"); else { - ti->error = "Persistent flag is not P or N"; + ti->error = "Exception store type is not P or N"; r = -EINVAL; goto bad_type; } @@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, if (r) goto bad; - r = type->ctr(tmp_store, 0, NULL); + r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); if (r) { ti->error = "Exception store type constructor failed"; goto bad; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -42,8 +42,7 @@ struct dm_exception_store_type { const char *name; struct module *module; - int (*ctr) (struct dm_exception_store *store, - unsigned argc, char **argv); + int (*ctr) (struct dm_exception_store *store, char *options); /* * Destroys this object when you've finished with it. @@ -123,6 +122,8 @@ struct dm_exception_store { unsigned chunk_shift; void *context; + + bool userspace_supports_overflow; }; /* diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) */ if (min_region_size > (1 << 13)) { /* If not a power of 2, make it the next power of 2 */ - if (min_region_size & (min_region_size - 1)) - region_size = 1 << fls(region_size); + region_size = roundup_pow_of_two(min_region_size); DMINFO("Choosing default region size of %lu sectors", region_size); } else { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..aeacad9be51d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -7,6 +7,7 @@ #include "dm-exception-store.h" +#include <linux/ctype.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> @@ -843,8 +844,7 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) DMWARN("write header failed"); } -static int persistent_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int persistent_ctr(struct dm_exception_store *store, char *options) { struct pstore *ps; @@ -873,6 +873,16 @@ static int persistent_ctr(struct dm_exception_store *store, return -ENOMEM; } + if (options) { + char overflow = toupper(options[0]); + if (overflow == 'O') + store->userspace_supports_overflow = true; + else { + DMERR("Unsupported persistent store option: %s", options); + return -EINVAL; + } + } + store->context = ps; return 0; @@ -888,7 +898,8 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" P %llu", (unsigned long long)store->chunk_size); + DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", + (unsigned long long)store->chunk_size); } return sz; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, *metadata_sectors = 0; } -static int transient_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int transient_ctr(struct dm_exception_store *store, char *options) { struct transient_c *tc; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) } /* - * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> + * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> */ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, u.store_swap = snap_dest->store; snap_dest->store = snap_src->store; + snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; snap_src->store = u.store_swap; snap_dest->store->snap = snap_dest; @@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) pe = __find_pending_exception(s, pe, chunk); if (!pe) { - s->snapshot_overflowed = 1; - DMERR("Snapshot overflowed: Unable to allocate exception."); + if (s->store->userspace_supports_overflow) { + s->snapshot_overflowed = 1; + DMERR("Snapshot overflowed: Unable to allocate exception."); + } else + __invalidate_snapshot(s, -ENOMEM); r = -EIO; goto out_unlock; } @@ -2365,7 +2369,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 14, 0}, + .version = {1, 15, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; + int error = clone->bi_error; bio_put(clone); @@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) * the remainder. */ return; - else if (bio->bi_error) { + else if (error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed. */ - tio->error = bio->bi_error; + tio->error = error; return; } @@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) might_sleep(); - map = dm_get_live_table(md, &srcu_idx); - spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); @@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); + map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } - mutex_unlock(&md->suspend_lock); - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); + mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); goto unlock; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) return 0; out_free_conf: - if (conf->pool) - mempool_destroy(conf->pool); + mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) struct md_rdev *rdev; bool discard_supported = false; - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; - } blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); @@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..ddd8a5f572aa 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (bio->bi_iter.bi_sector >= - conf->mddev->curr_resync_completed) { + if (bio->bi_iter.bi_sector >= conf->next_resync) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) conf->r1buf_pool = NULL; spin_lock_irq(&conf->resync_lock); - conf->next_resync = 0; + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; conf->start_next_window = MaxSector; conf->current_window_requests += conf->next_window_requests; @@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r1_bio = list_first_entry(&conf->bio_end_io_list, - struct r1bio, retry_list); + r1_bio = list_first_entry(&tmp, struct r1bio, + retry_list); list_del(&r1_bio->retry_list); raid_end_bio_io(r1_bio); } @@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); @@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..9f69dc526f8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r10_bio = list_first_entry(&conf->bio_end_io_list, - struct r10bio, retry_list); + r10_bio = list_first_entry(&tmp, struct r10bio, + retry_list); list_del(&r10_bio->retry_list); raid_end_bio_io(r10_bio); } @@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", mdname(mddev)); if (conf) { - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf); @@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); @@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) { struct r10conf *conf = priv; - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf->mirrors_old); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) drop_one_stripe(conf)) ; - if (conf->slab_cache) - kmem_cache_destroy(conf->slab_cache); + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); + if (bi) + s->to_read--; while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = @@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, */ clear_bit(R5_LOCKED, &sh->dev[i].flags); } + s->to_write = 0; + s->written = 0; if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (fdev[i]->towrite && !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) @@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (s->failed_num[i] != sh->pd_idx && s->failed_num[i] != sh->qd_idx && !test_bit(R5_UPTODATE, &fdev[i]->flags) && diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8eec887c8f70..6d7c188fb65c 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) * after the host receives the enum_resp * message clients may be added or removed */ - if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || dev->hbm_state >= MEI_HBM_STOPPED) { dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || - (mrq->stop && mrq->stop->error == -EILSEQ)) + (mrq->stop && mrq->stop->error == -EILSEQ))) mmc_retune_needed(host); if (err && cmd->retries && mmc_host_is_spi(host)) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) 0, &cd_gpio_invert); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; /* @@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; if (of_property_read_bool(np, "disable-wp")) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 781e4db31767..7fb0753abe30 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -182,6 +182,7 @@ struct omap_hsmmc_host { struct clk *fclk; struct clk *dbclk; struct regulator *pbias; + bool pbias_enabled; void __iomem *base; int vqmmc_enabled; resource_size_t mapbase; @@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return ret; } - if (!regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg enable fail\n"); return ret; } + host->pbias_enabled = 1; } } else { - if (regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 1) { ret = regulator_disable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg disable fail\n"); return ret; } + host->pbias_enabled = 0; } } @@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); if (IS_ERR(mmc->supply.vmmc)) { ret = PTR_ERR(mmc->supply.vmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", PTR_ERR(mmc->supply.vmmc)); @@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); if (IS_ERR(mmc->supply.vqmmc)) { ret = PTR_ERR(mmc->supply.vqmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", PTR_ERR(mmc->supply.vqmmc)); @@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) host->pbias = devm_regulator_get_optional(host->dev, "pbias"); if (IS_ERR(host->pbias)) { ret = PTR_ERR(host->pbias); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get pbias regulator %ld\n", PTR_ERR(host->pbias)); @@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->base = base + pdata->reg_offset; host->power_mode = MMC_POWER_OFF; host->next_data.cookie = 1; + host->pbias_enabled = 0; host->vqmmc_enabled = 0; ret = omap_hsmmc_gpio_init(mmc, host, pdata); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -28,6 +28,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> #include <linux/io.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> @@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); - if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { - if (host->pdata->gpio_card_ro_invert) - return !gpio_get_value(host->pdata->gpio_card_ro); - else - return gpio_get_value(host->pdata->gpio_card_ro); - } + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) + return mmc_gpio_get_ro(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* @@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) static const struct mmc_host_ops pxamci_ops = { .request = pxamci_request, + .get_cd = mmc_gpio_get_cd, .get_ro = pxamci_get_ro, .set_ios = pxamci_set_ios, .enable_sdio_irq = pxamci_enable_sdio_irq, @@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_power)) { - ret = gpio_request(gpio_power, "mmc card power"); + ret = devm_gpio_request(&pdev->dev, gpio_power, + "mmc card power"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", + gpio_power); goto out; } gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } - if (gpio_is_valid(gpio_ro)) { - ret = gpio_request(gpio_ro, "mmc card read only"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto err_gpio_ro; - } - gpio_direction_input(gpio_ro); + if (gpio_is_valid(gpio_ro)) + ret = mmc_gpio_request_ro(mmc, gpio_ro); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); + goto out; + } else { + mmc->caps |= host->pdata->gpio_card_ro_invert ? + MMC_CAP2_RO_ACTIVE_HIGH : 0; } - if (gpio_is_valid(gpio_cd)) { - ret = gpio_request(gpio_cd, "mmc card detect"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); - goto err_gpio_cd; - } - gpio_direction_input(gpio_cd); - ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "mmc card detect", mmc); - if (ret) { - dev_err(&pdev->dev, "failed to request card detect IRQ\n"); - goto err_request_irq; - } + if (gpio_is_valid(gpio_cd)) + ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); + goto out; } if (host->pdata && host->pdata->init) @@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) return 0; -err_request_irq: - gpio_free(gpio_cd); -err_gpio_cd: - gpio_free(gpio_ro); -err_gpio_ro: - gpio_free(gpio_power); - out: +out: if (host) { if (host->dma_chan_rx) dma_release_channel(host->dma_chan_rx); @@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } - if (gpio_is_valid(gpio_cd)) { - free_irq(gpio_to_irq(gpio_cd), mmc); - gpio_free(gpio_cd); - } - if (gpio_is_valid(gpio_ro)) - gpio_free(gpio_ro); - if (gpio_is_valid(gpio_power)) - gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1556643a41d..a0f05de5409f 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { static const struct sdhci_pltfm_data soc_data_sama5d2 = { .ops = &sdhci_at91_sama5d2_ops, + .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, }; static const struct of_device_id sdhci_at91_dt_match[] = { diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 946d37f94a31..f5edf9d3a18a 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev, struct sdhci_pxa *pxa = pltfm_host->priv; struct resource *res; + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; host->quirks |= SDHCI_QUIRK_MISSING_CAPS; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf-sdio3"); @@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) uhs == MMC_TIMING_UHS_DDR50) { reg_val &= ~SDIO3_CONF_CLK_INV; reg_val |= SDIO3_CONF_SD_FB_CLK; + } else if (uhs == MMC_TIMING_MMC_HS) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; } else { reg_val |= SDIO3_CONF_CLK_INV; reg_val &= ~SDIO3_CONF_SD_FB_CLK; @@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = armada_38x_quirks(pdev, host); if (ret < 0) - goto err_clk_get; + goto err_mbus_win; ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); if (ret < 0) goto err_mbus_win; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 64b7fdbd1a9c..fbc7efdddcb5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) + mdelay(1); if (clock == 0) return; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c02ff46c8ac..9d4aa31b683a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -412,6 +412,11 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* + * When internal clock is disabled, a delay is needed before modifying the + * SD clock frequency or enabling back the internal clock. + */ +#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -210,6 +210,16 @@ #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ +#define SDXC_CLK_400K 0 +#define SDXC_CLK_25M 1 +#define SDXC_CLK_50M 2 +#define SDXC_CLK_50M_DDR 3 + +struct sunxi_mmc_clk_delay { + u32 output; + u32 sample; +}; + struct sunxi_idma_des { u32 config; u32 buf_size; @@ -229,6 +239,7 @@ struct sunxi_mmc_host { struct clk *clk_mmc; struct clk *clk_sample; struct clk *clk_output; + const struct sunxi_mmc_clk_delay *clk_delays; /* irq */ spinlock_t lock; @@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* determine delays */ if (rate <= 400000) { - oclk_dly = 180; - sclk_dly = 42; + oclk_dly = host->clk_delays[SDXC_CLK_400K].output; + sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; } else if (rate <= 25000000) { - oclk_dly = 180; - sclk_dly = 75; + oclk_dly = host->clk_delays[SDXC_CLK_25M].output; + sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; } else if (rate <= 50000000) { if (ios->timing == MMC_TIMING_UHS_DDR50) { - oclk_dly = 60; - sclk_dly = 120; + oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; } else { - oclk_dly = 90; - sclk_dly = 150; + oclk_dly = host->clk_delays[SDXC_CLK_50M].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; } - } else if (rate <= 100000000) { - oclk_dly = 6; - sclk_dly = 24; - } else if (rate <= 200000000) { - oclk_dly = 3; - sclk_dly = 12; } else { return -EINVAL; } @@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", }, { .compatible = "allwinner,sun5i-a13-mmc", }, + { .compatible = "allwinner,sun9i-a80-mmc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); @@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { .hw_reset = sunxi_mmc_hw_reset, }; +static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, +}; + +static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, +}; + static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, struct platform_device *pdev) { @@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, else host->idma_des_size_bits = 16; + if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) + host->clk_delays = sun9i_mmc_clk_delays; + else + host->clk_delays = sunxi_mmc_clk_delays; + ret = mmc_regulator_get_supply(host->mmc); if (ret) { if (ret != -EPROBE_DEFER) diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 2426db88db36..f04445b992f5 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) oob_chunk_size); /* the last chunk */ - memcpy16_toio(&s[oob_chunk_size * sparebuf_size], + memcpy16_toio(&s[i * sparebuf_size], &d[i * oob_chunk_size], host->used_oobsize - i * oob_chunk_size); } diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index f97a58d6aae1..e7d333c162be 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -147,6 +147,10 @@ #define NFC_ECC_MODE GENMASK(15, 12) #define NFC_RANDOM_SEED GENMASK(30, 16) +/* NFC_USER_DATA helper macros */ +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ + ((buf)[2] << 16) | ((buf)[3] << 24)) + #define NFC_DEFAULT_TIMEOUT_MS 1000 #define NFC_SRAM_SIZE 1024 @@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, - chip->oob_poi + offset - mtd->writesize, - 4); - } + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + + layout->oobfree[i].offset), + nfc->regs + NFC_REG_USER_DATA_BASE); chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); @@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, offset += ecc->size; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, - 4); - } + writel(NFC_BUF_TO_USER_DATA(oob), + nfc->regs + NFC_REG_USER_DATA_BASE); tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | (1 << 30); @@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) node); nand_release(&chip->mtd); sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); } } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, goto bad; } + if (data_size > ubi->leb_size) { + ubi_err(ubi, "bad data_size"); + goto bad; + } + if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + err = -ENOSPC; goto out_free; } ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f8baa897d1a0..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else reg |= PORT_CONTROL_FRAME_MODE_DSA; + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) } /* Flush FLI data fifo. */ -static u32 +static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; @@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) } /* Read flash status. */ -static u32 +static int bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg dev_status; - u32 status; + int status; u32 ret_status; int i; @@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) } /* Start flash read operation. */ -static u32 +static int bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { - u32 status; + int status; /* len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) @@ -1703,7 +1703,8 @@ static enum bfa_status bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { - u32 n, status; + u32 n; + int status; u32 off, l, s, residue, fifo_sz; residue = len; diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) struct net_device *ndev; struct hip04_priv *priv; struct resource *res; - unsigned int irq; + int irq; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { u32 index; }; -#define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4SYNC_ETHTOOL_REGS_VER 2 +#define EMAC_ETHTOOL_REGS_VER 3 +#define EMAC4_ETHTOOL_REGS_VER 4 +#define EMAC4SYNC_ETHTOOL_REGS_VER 5 #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..62488a67149d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1014,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..2fdf978ae6a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..929d47152bf2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { @@ -948,6 +955,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1184,10 +1184,11 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + err = new_steering_entry(dev, port, steer, + index, qp->qpn); else - existing_steering_entry(dev, port, steer, - index, qp->qpn); + err = existing_steering_entry(dev, port, steer, + index, qp->qpn); } if (err && link && index != -1) { if (index < dev->caps.num_mgms) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) return err; } - -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) -{ - struct mlx5_cmd_query_special_contexts_mbox_in in; - struct mlx5_cmd_query_special_contexts_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - *rsvd_lkey = be32_to_cpu(out.resd_lkey); - - return err; -} -EXPORT_SYMBOL(mlx5_core_query_special_context); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - u16 rg_saw_cnt; + int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d3c6676b3c0c..6fd4e5a5ef4a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from reading */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from writing */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, return rc; /* shift bits in-place */ - if (cell->bit_offset || cell->bit_offset) + if (cell->bit_offset || cell->nbits) nvmem_shift_read_buffer_in_place(cell, buf); *len = cell->bytes; @@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); /* free the tmp buffer */ - if (cell->bit_offset) + if (cell->bit_offset || cell->nbits) kfree(buf); if (IS_ERR_VALUE(rc)) diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index 14777dd5212d..cfa3b85064dd 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) struct nvmem_device *nvmem; struct regmap *regmap; struct sunxi_sid *sid; - int i, size; + int ret, i, size; char *randomness; sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); @@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev) return PTR_ERR(nvmem); randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + for (i = 0; i < size; i++) randomness[i] = sunxi_sid_read_byte(sid, i); @@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nvmem); return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; } static int sunxi_sid_remove(struct platform_device *pdev) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver - * supports runtime PM, it should call pm_runtime_put_noidle() - * in its probe routine and pm_runtime_get_noresume() in its - * remove routine. + * supports runtime PM, it should call pm_runtime_put_noidle(), + * or any other runtime PM helper function decrementing the usage + * count, in its probe routine and pm_runtime_get_noresume() in + * its remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 47da573d0bab..7eb5859dd035 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -206,6 +206,15 @@ config PHY_HIX5HD2_SATA help Support for SATA PHY on Hisilicon hix5hd2 Soc. +config PHY_MT65XX_USB3 + tristate "Mediatek USB3.0 PHY Driver" + depends on ARCH_MEDIATEK && OF + select GENERIC_PHY + help + Say 'Y' here to add support for Mediatek USB3.0 PHY driver + for mt65xx SoCs. it supports two usb2.0 ports and + one usb3.0 port. + config PHY_SUN4I_USB tristate "Allwinner sunxi SoC USB PHY driver" depends on ARCH_SUNXI && HAS_IOMEM && OF @@ -371,4 +380,13 @@ config PHY_BRCMSTB_SATA Enable this to support the SATA3 PHY on 28nm Broadcom STB SoCs. Likely useful only with CONFIG_SATA_BRCMSTB enabled. +config PHY_CYGNUS_PCIE + tristate "Broadcom Cygnus PCIe PHY driver" + depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST) + select GENERIC_PHY + default ARCH_BCM_CYGNUS + help + Enable this to support the Broadcom Cygnus PCIe PHY. + If unsure, say N. + endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index a5b18c18fc12..075db1a81aa5 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o +obj-$(CONFIG_PHY_MT65XX_USB3) += phy-mt65xx-usb3.o obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o @@ -46,3 +47,4 @@ obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o obj-$(CONFIG_PHY_BRCMSTB_SATA) += phy-brcmstb-sata.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o +obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c new file mode 100644 index 000000000000..7ad72b7d2b98 --- /dev/null +++ b/drivers/phy/phy-bcm-cygnus-pcie.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#define PCIE_CFG_OFFSET 0x00 +#define PCIE1_PHY_IDDQ_SHIFT 10 +#define PCIE0_PHY_IDDQ_SHIFT 2 + +enum cygnus_pcie_phy_id { + CYGNUS_PHY_PCIE0 = 0, + CYGNUS_PHY_PCIE1, + MAX_NUM_PHYS, +}; + +struct cygnus_pcie_phy_core; + +/** + * struct cygnus_pcie_phy - Cygnus PCIe PHY device + * @core: pointer to the Cygnus PCIe PHY core control + * @id: internal ID to identify the Cygnus PCIe PHY + * @phy: pointer to the kernel PHY device + */ +struct cygnus_pcie_phy { + struct cygnus_pcie_phy_core *core; + enum cygnus_pcie_phy_id id; + struct phy *phy; +}; + +/** + * struct cygnus_pcie_phy_core - Cygnus PCIe PHY core control + * @dev: pointer to device + * @base: base register + * @lock: mutex to protect access to individual PHYs + * @phys: pointer to Cygnus PHY device + */ +struct cygnus_pcie_phy_core { + struct device *dev; + void __iomem *base; + struct mutex lock; + struct cygnus_pcie_phy phys[MAX_NUM_PHYS]; +}; + +static int cygnus_pcie_power_config(struct cygnus_pcie_phy *phy, bool enable) +{ + struct cygnus_pcie_phy_core *core = phy->core; + unsigned shift; + u32 val; + + mutex_lock(&core->lock); + + switch (phy->id) { + case CYGNUS_PHY_PCIE0: + shift = PCIE0_PHY_IDDQ_SHIFT; + break; + + case CYGNUS_PHY_PCIE1: + shift = PCIE1_PHY_IDDQ_SHIFT; + break; + + default: + mutex_unlock(&core->lock); + dev_err(core->dev, "PCIe PHY %d invalid\n", phy->id); + return -EINVAL; + } + + if (enable) { + val = readl(core->base + PCIE_CFG_OFFSET); + val &= ~BIT(shift); + writel(val, core->base + PCIE_CFG_OFFSET); + /* + * Wait 50 ms for the PCIe Serdes to stabilize after the analog + * front end is brought up + */ + msleep(50); + } else { + val = readl(core->base + PCIE_CFG_OFFSET); + val |= BIT(shift); + writel(val, core->base + PCIE_CFG_OFFSET); + } + + mutex_unlock(&core->lock); + dev_dbg(core->dev, "PCIe PHY %d %s\n", phy->id, + enable ? "enabled" : "disabled"); + return 0; +} + +static int cygnus_pcie_phy_power_on(struct phy *p) +{ + struct cygnus_pcie_phy *phy = phy_get_drvdata(p); + + return cygnus_pcie_power_config(phy, true); +} + +static int cygnus_pcie_phy_power_off(struct phy *p) +{ + struct cygnus_pcie_phy *phy = phy_get_drvdata(p); + + return cygnus_pcie_power_config(phy, false); +} + +static struct phy_ops cygnus_pcie_phy_ops = { + .power_on = cygnus_pcie_phy_power_on, + .power_off = cygnus_pcie_phy_power_off, + .owner = THIS_MODULE, +}; + +static int cygnus_pcie_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node, *child; + struct cygnus_pcie_phy_core *core; + struct phy_provider *provider; + struct resource *res; + unsigned cnt = 0; + + if (of_get_child_count(node) == 0) { + dev_err(dev, "PHY no child node\n"); + return -ENODEV; + } + + core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; + + core->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + core->base = devm_ioremap_resource(dev, res); + if (IS_ERR(core->base)) + return PTR_ERR(core->base); + + mutex_init(&core->lock); + + for_each_available_child_of_node(node, child) { + unsigned int id; + struct cygnus_pcie_phy *p; + + if (of_property_read_u32(child, "reg", &id)) { + dev_err(dev, "missing reg property for %s\n", + child->name); + return -EINVAL; + } + + if (id >= MAX_NUM_PHYS) { + dev_err(dev, "invalid PHY id: %u\n", id); + return -EINVAL; + } + + if (core->phys[id].phy) { + dev_err(dev, "duplicated PHY id: %u\n", id); + return -EINVAL; + } + + p = &core->phys[id]; + p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); + if (IS_ERR(p->phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(p->phy); + } + + p->core = core; + p->id = id; + phy_set_drvdata(p->phy, p); + cnt++; + } + + dev_set_drvdata(dev, core); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) { + dev_err(dev, "failed to register PHY provider\n"); + return PTR_ERR(provider); + } + + dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); + + return 0; +} + +static const struct of_device_id cygnus_pcie_phy_match_table[] = { + { .compatible = "brcm,cygnus-pcie-phy" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, cygnus_pcie_phy_match_table); + +static struct platform_driver cygnus_pcie_phy_driver = { + .driver = { + .name = "cygnus-pcie-phy", + .of_match_table = cygnus_pcie_phy_match_table, + }, + .probe = cygnus_pcie_phy_probe, +}; +module_platform_driver(cygnus_pcie_phy_driver); + +MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom Cygnus PCIe PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 0062027afb1e..77a2e054fdea 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = { { .compatible = "marvell,berlin2q-sata-phy" }, { }, }; +MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); static struct platform_driver phy_berlin_sata_driver = { .probe = phy_berlin_sata_probe, diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c new file mode 100644 index 000000000000..f30b28bd41fe --- /dev/null +++ b/drivers/phy/phy-mt65xx-usb3.c @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <dt-bindings/phy/phy.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +/* + * for sifslv2 register, but exclude port's; + * relative to USB3_SIF2_BASE base address + */ +#define SSUSB_SIFSLV_SPLLC 0x0000 + +/* offsets of sub-segment in each port registers */ +#define SSUSB_SIFSLV_U2PHY_COM_BASE 0x0000 +#define SSUSB_SIFSLV_U3PHYD_BASE 0x0100 +#define SSUSB_USB30_PHYA_SIV_B_BASE 0x0300 +#define SSUSB_SIFSLV_U3PHYA_DA_BASE 0x0400 + +#define U3P_USBPHYACR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0000) +#define PA0_RG_U2PLL_FORCE_ON BIT(15) + +#define U3P_USBPHYACR2 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0008) +#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18) + +#define U3P_USBPHYACR5 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0014) +#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12) +#define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12) +#define PA5_RG_U2_HS_100U_U3_EN BIT(11) + +#define U3P_USBPHYACR6 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0018) +#define PA6_RG_U2_ISO_EN BIT(31) +#define PA6_RG_U2_BC11_SW_EN BIT(23) +#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20) + +#define U3P_U2PHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0020) +#define P2C_RG_USB20_GPIO_CTL BIT(9) +#define P2C_USB20_GPIO_MODE BIT(8) +#define P2C_U2_GPIO_CTR_MSK (P2C_RG_USB20_GPIO_CTL | P2C_USB20_GPIO_MODE) + +#define U3D_U2PHYDCR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0060) +#define P2C_RG_SIF_U2PLL_FORCE_ON BIT(24) + +#define U3P_U2PHYDTM0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0068) +#define P2C_FORCE_UART_EN BIT(26) +#define P2C_FORCE_DATAIN BIT(23) +#define P2C_FORCE_DM_PULLDOWN BIT(21) +#define P2C_FORCE_DP_PULLDOWN BIT(20) +#define P2C_FORCE_XCVRSEL BIT(19) +#define P2C_FORCE_SUSPENDM BIT(18) +#define P2C_FORCE_TERMSEL BIT(17) +#define P2C_RG_DATAIN GENMASK(13, 10) +#define P2C_RG_DATAIN_VAL(x) ((0xf & (x)) << 10) +#define P2C_RG_DMPULLDOWN BIT(7) +#define P2C_RG_DPPULLDOWN BIT(6) +#define P2C_RG_XCVRSEL GENMASK(5, 4) +#define P2C_RG_XCVRSEL_VAL(x) ((0x3 & (x)) << 4) +#define P2C_RG_SUSPENDM BIT(3) +#define P2C_RG_TERMSEL BIT(2) +#define P2C_DTM0_PART_MASK \ + (P2C_FORCE_DATAIN | P2C_FORCE_DM_PULLDOWN | \ + P2C_FORCE_DP_PULLDOWN | P2C_FORCE_XCVRSEL | \ + P2C_FORCE_TERMSEL | P2C_RG_DMPULLDOWN | \ + P2C_RG_DPPULLDOWN | P2C_RG_TERMSEL) + +#define U3P_U2PHYDTM1 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x006C) +#define P2C_RG_UART_EN BIT(16) +#define P2C_RG_VBUSVALID BIT(5) +#define P2C_RG_SESSEND BIT(4) +#define P2C_RG_AVALID BIT(2) + +#define U3P_U3_PHYA_REG0 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0000) +#define P3A_RG_U3_VUSB10_ON BIT(5) + +#define U3P_U3_PHYA_REG6 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0018) +#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28) +#define P3A_RG_TX_EIDLE_CM_VAL(x) ((0xf & (x)) << 28) + +#define U3P_U3_PHYA_REG9 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0024) +#define P3A_RG_RX_DAC_MUX GENMASK(5, 1) +#define P3A_RG_RX_DAC_MUX_VAL(x) ((0x1f & (x)) << 1) + +#define U3P_U3PHYA_DA_REG0 (SSUSB_SIFSLV_U3PHYA_DA_BASE + 0x0000) +#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10) +#define P3A_RG_XTAL_EXT_EN_U3_VAL(x) ((0x3 & (x)) << 10) + +#define U3P_PHYD_CDR1 (SSUSB_SIFSLV_U3PHYD_BASE + 0x005c) +#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24) +#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24) +#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8) +#define P3D_RG_CDR_BIR_LTD0_VAL(x) ((0x1f & (x)) << 8) + +#define U3P_XTALCTL3 (SSUSB_SIFSLV_SPLLC + 0x0018) +#define XC3_RG_U3_XTAL_RX_PWD BIT(9) +#define XC3_RG_U3_FRC_XTAL_RX_PWD BIT(8) + +struct mt65xx_phy_instance { + struct phy *phy; + void __iomem *port_base; + u32 index; + u8 type; +}; + +struct mt65xx_u3phy { + struct device *dev; + void __iomem *sif_base; /* include sif2, but exclude port's */ + struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */ + struct mt65xx_phy_instance **phys; + int nphys; +}; + +static void phy_instance_init(struct mt65xx_u3phy *u3phy, + struct mt65xx_phy_instance *instance) +{ + void __iomem *port_base = instance->port_base; + u32 index = instance->index; + u32 tmp; + + /* switch to USB function. (system register, force ip into usb mode) */ + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp &= ~P2C_FORCE_UART_EN; + tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0); + writel(tmp, port_base + U3P_U2PHYDTM0); + + tmp = readl(port_base + U3P_U2PHYDTM1); + tmp &= ~P2C_RG_UART_EN; + writel(tmp, port_base + U3P_U2PHYDTM1); + + if (!index) { + tmp = readl(port_base + U3P_U2PHYACR4); + tmp &= ~P2C_U2_GPIO_CTR_MSK; + writel(tmp, port_base + U3P_U2PHYACR4); + + tmp = readl(port_base + U3P_USBPHYACR2); + tmp |= PA2_RG_SIF_U2PLL_FORCE_EN; + writel(tmp, port_base + U3P_USBPHYACR2); + + tmp = readl(port_base + U3D_U2PHYDCR0); + tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; + writel(tmp, port_base + U3D_U2PHYDCR0); + } else { + tmp = readl(port_base + U3D_U2PHYDCR0); + tmp |= P2C_RG_SIF_U2PLL_FORCE_ON; + writel(tmp, port_base + U3D_U2PHYDCR0); + + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM; + writel(tmp, port_base + U3P_U2PHYDTM0); + } + + /* DP/DM BC1.1 path Disable */ + tmp = readl(port_base + U3P_USBPHYACR6); + tmp &= ~PA6_RG_U2_BC11_SW_EN; + writel(tmp, port_base + U3P_USBPHYACR6); + + tmp = readl(port_base + U3P_U3PHYA_DA_REG0); + tmp &= ~P3A_RG_XTAL_EXT_EN_U3; + tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2); + writel(tmp, port_base + U3P_U3PHYA_DA_REG0); + + tmp = readl(port_base + U3P_U3_PHYA_REG9); + tmp &= ~P3A_RG_RX_DAC_MUX; + tmp |= P3A_RG_RX_DAC_MUX_VAL(4); + writel(tmp, port_base + U3P_U3_PHYA_REG9); + + tmp = readl(port_base + U3P_U3_PHYA_REG6); + tmp &= ~P3A_RG_TX_EIDLE_CM; + tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe); + writel(tmp, port_base + U3P_U3_PHYA_REG6); + + tmp = readl(port_base + U3P_PHYD_CDR1); + tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1); + tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3); + writel(tmp, port_base + U3P_PHYD_CDR1); + + dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index); +} + +static void phy_instance_power_on(struct mt65xx_u3phy *u3phy, + struct mt65xx_phy_instance *instance) +{ + void __iomem *port_base = instance->port_base; + u32 index = instance->index; + u32 tmp; + + if (!index) { + /* Set RG_SSUSB_VUSB10_ON as 1 after VUSB10 ready */ + tmp = readl(port_base + U3P_U3_PHYA_REG0); + tmp |= P3A_RG_U3_VUSB10_ON; + writel(tmp, port_base + U3P_U3_PHYA_REG0); + } + + /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */ + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL); + tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK); + writel(tmp, port_base + U3P_U2PHYDTM0); + + /* OTG Enable */ + tmp = readl(port_base + U3P_USBPHYACR6); + tmp |= PA6_RG_U2_OTG_VBUSCMP_EN; + writel(tmp, port_base + U3P_USBPHYACR6); + + if (!index) { + tmp = readl(u3phy->sif_base + U3P_XTALCTL3); + tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD; + writel(tmp, u3phy->sif_base + U3P_XTALCTL3); + + /* [mt8173]disable Change 100uA current from SSUSB */ + tmp = readl(port_base + U3P_USBPHYACR5); + tmp &= ~PA5_RG_U2_HS_100U_U3_EN; + writel(tmp, port_base + U3P_USBPHYACR5); + } + + tmp = readl(port_base + U3P_U2PHYDTM1); + tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID; + tmp &= ~P2C_RG_SESSEND; + writel(tmp, port_base + U3P_U2PHYDTM1); + + /* USB 2.0 slew rate calibration */ + tmp = readl(port_base + U3P_USBPHYACR5); + tmp &= ~PA5_RG_U2_HSTX_SRCTRL; + tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(4); + writel(tmp, port_base + U3P_USBPHYACR5); + + if (index) { + tmp = readl(port_base + U3D_U2PHYDCR0); + tmp |= P2C_RG_SIF_U2PLL_FORCE_ON; + writel(tmp, port_base + U3D_U2PHYDCR0); + + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM; + writel(tmp, port_base + U3P_U2PHYDTM0); + } + dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index); +} + +static void phy_instance_power_off(struct mt65xx_u3phy *u3phy, + struct mt65xx_phy_instance *instance) +{ + void __iomem *port_base = instance->port_base; + u32 index = instance->index; + u32 tmp; + + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN); + tmp |= P2C_FORCE_SUSPENDM; + writel(tmp, port_base + U3P_U2PHYDTM0); + + /* OTG Disable */ + tmp = readl(port_base + U3P_USBPHYACR6); + tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN; + writel(tmp, port_base + U3P_USBPHYACR6); + + if (!index) { + /* (also disable)Change 100uA current switch to USB2.0 */ + tmp = readl(port_base + U3P_USBPHYACR5); + tmp &= ~PA5_RG_U2_HS_100U_U3_EN; + writel(tmp, port_base + U3P_USBPHYACR5); + } + + /* let suspendm=0, set utmi into analog power down */ + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp &= ~P2C_RG_SUSPENDM; + writel(tmp, port_base + U3P_U2PHYDTM0); + udelay(1); + + tmp = readl(port_base + U3P_U2PHYDTM1); + tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID); + tmp |= P2C_RG_SESSEND; + writel(tmp, port_base + U3P_U2PHYDTM1); + + if (!index) { + tmp = readl(port_base + U3P_U3_PHYA_REG0); + tmp &= ~P3A_RG_U3_VUSB10_ON; + writel(tmp, port_base + U3P_U3_PHYA_REG0); + } else { + tmp = readl(port_base + U3D_U2PHYDCR0); + tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; + writel(tmp, port_base + U3D_U2PHYDCR0); + } + + dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index); +} + +static void phy_instance_exit(struct mt65xx_u3phy *u3phy, + struct mt65xx_phy_instance *instance) +{ + void __iomem *port_base = instance->port_base; + u32 index = instance->index; + u32 tmp; + + if (index) { + tmp = readl(port_base + U3D_U2PHYDCR0); + tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; + writel(tmp, port_base + U3D_U2PHYDCR0); + + tmp = readl(port_base + U3P_U2PHYDTM0); + tmp &= ~P2C_FORCE_SUSPENDM; + writel(tmp, port_base + U3P_U2PHYDTM0); + } +} + +static int mt65xx_phy_init(struct phy *phy) +{ + struct mt65xx_phy_instance *instance = phy_get_drvdata(phy); + struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent); + int ret; + + ret = clk_prepare_enable(u3phy->u3phya_ref); + if (ret) { + dev_err(u3phy->dev, "failed to enable u3phya_ref\n"); + return ret; + } + + phy_instance_init(u3phy, instance); + return 0; +} + +static int mt65xx_phy_power_on(struct phy *phy) +{ + struct mt65xx_phy_instance *instance = phy_get_drvdata(phy); + struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent); + + phy_instance_power_on(u3phy, instance); + return 0; +} + +static int mt65xx_phy_power_off(struct phy *phy) +{ + struct mt65xx_phy_instance *instance = phy_get_drvdata(phy); + struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent); + + phy_instance_power_off(u3phy, instance); + return 0; +} + +static int mt65xx_phy_exit(struct phy *phy) +{ + struct mt65xx_phy_instance *instance = phy_get_drvdata(phy); + struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent); + + phy_instance_exit(u3phy, instance); + clk_disable_unprepare(u3phy->u3phya_ref); + return 0; +} + +static struct phy *mt65xx_phy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct mt65xx_u3phy *u3phy = dev_get_drvdata(dev); + struct mt65xx_phy_instance *instance = NULL; + struct device_node *phy_np = args->np; + int index; + + + if (args->args_count != 1) { + dev_err(dev, "invalid number of cells in 'phy' property\n"); + return ERR_PTR(-EINVAL); + } + + for (index = 0; index < u3phy->nphys; index++) + if (phy_np == u3phy->phys[index]->phy->dev.of_node) { + instance = u3phy->phys[index]; + break; + } + + if (!instance) { + dev_err(dev, "failed to find appropriate phy\n"); + return ERR_PTR(-EINVAL); + } + + instance->type = args->args[0]; + + if (!(instance->type == PHY_TYPE_USB2 || + instance->type == PHY_TYPE_USB3)) { + dev_err(dev, "unsupported device type: %d\n", instance->type); + return ERR_PTR(-EINVAL); + } + + return instance->phy; +} + +static struct phy_ops mt65xx_u3phy_ops = { + .init = mt65xx_phy_init, + .exit = mt65xx_phy_exit, + .power_on = mt65xx_phy_power_on, + .power_off = mt65xx_phy_power_off, + .owner = THIS_MODULE, +}; + +static int mt65xx_u3phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *child_np; + struct phy_provider *provider; + struct resource *sif_res; + struct mt65xx_u3phy *u3phy; + struct resource res; + int port; + + u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); + if (!u3phy) + return -ENOMEM; + + u3phy->nphys = of_get_child_count(np); + u3phy->phys = devm_kcalloc(dev, u3phy->nphys, + sizeof(*u3phy->phys), GFP_KERNEL); + if (!u3phy->phys) + return -ENOMEM; + + u3phy->dev = dev; + platform_set_drvdata(pdev, u3phy); + + sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + u3phy->sif_base = devm_ioremap_resource(dev, sif_res); + if (IS_ERR(u3phy->sif_base)) { + dev_err(dev, "failed to remap sif regs\n"); + return PTR_ERR(u3phy->sif_base); + } + + u3phy->u3phya_ref = devm_clk_get(dev, "u3phya_ref"); + if (IS_ERR(u3phy->u3phya_ref)) { + dev_err(dev, "error to get u3phya_ref\n"); + return PTR_ERR(u3phy->u3phya_ref); + } + + port = 0; + for_each_child_of_node(np, child_np) { + struct mt65xx_phy_instance *instance; + struct phy *phy; + int retval; + + instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); + if (!instance) + return -ENOMEM; + + u3phy->phys[port] = instance; + + phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "failed to create phy\n"); + return PTR_ERR(phy); + } + + retval = of_address_to_resource(child_np, 0, &res); + if (retval) { + dev_err(dev, "failed to get address resource(id-%d)\n", + port); + return retval; + } + + instance->port_base = devm_ioremap_resource(&phy->dev, &res); + if (IS_ERR(instance->port_base)) { + dev_err(dev, "failed to remap phy regs\n"); + return PTR_ERR(instance->port_base); + } + + instance->phy = phy; + instance->index = port; + phy_set_drvdata(phy, instance); + port++; + } + + provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id mt65xx_u3phy_id_table[] = { + { .compatible = "mediatek,mt8173-u3phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mt65xx_u3phy_id_table); + +static struct platform_driver mt65xx_u3phy_driver = { + .probe = mt65xx_u3phy_probe, + .driver = { + .name = "mt65xx-u3phy", + .of_match_table = mt65xx_u3phy_id_table, + }, +}; + +module_platform_driver(mt65xx_u3phy_driver); + +MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>"); +MODULE_DESCRIPTION("mt65xx USB PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 49a1ed0cef56..107cb57c3513 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c @@ -432,6 +432,7 @@ out_disable_src: out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); static int ufs_qcom_phy_disable_vreg(struct phy *phy, @@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) phy->is_ref_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); #define UFS_REF_CLK_EN (1 << 5) @@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); /* Turn ON M-PHY RMMI interface clocks */ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) @@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); /* Turn OFF M-PHY RMMI interface clocks */ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) @@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) phy->is_iface_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); int ufs_qcom_phy_start_serdes(struct phy *generic_phy) { @@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) { @@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, u8 major, u16 minor, u16 step) @@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, ufs_qcom_phy->host_ctrl_rev_minor = minor; ufs_qcom_phy->host_ctrl_rev_step = step; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) { @@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); int ufs_qcom_phy_remove(struct phy *generic_phy, struct ufs_qcom_phy *ufs_qcom_phy) @@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) return ufs_qcom_phy->phy_spec_ops-> is_physical_coding_sublayer_ready(ufs_qcom_phy); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); int ufs_qcom_phy_power_on(struct phy *generic_phy) { diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 5a5c073e72fe..91d6f342c565 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) struct device_node *child; struct regmap *grf; unsigned int reg_offset; + int err; grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(grf)) { @@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) return PTR_ERR(rk_phy->phy); } phy_set_drvdata(rk_phy->phy, rk_phy); + + /* only power up usb phy when it use, so disable it when init*/ + err = rockchip_usb_phy_power(rk_phy, 1); + if (err) + return err; } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c index f278a9c547e1..1d22d93b552d 100644 --- a/drivers/phy/phy-samsung-usb2.c +++ b/drivers/phy/phy-samsung-usb2.c @@ -27,6 +27,13 @@ static int samsung_usb2_phy_power_on(struct phy *phy) dev_dbg(drv->dev, "Request to power_on \"%s\" usb phy\n", inst->cfg->label); + + if (drv->vbus) { + ret = regulator_enable(drv->vbus); + if (ret) + goto err_regulator; + } + ret = clk_prepare_enable(drv->clk); if (ret) goto err_main_clk; @@ -48,6 +55,9 @@ err_power_on: err_instance_clk: clk_disable_unprepare(drv->clk); err_main_clk: + if (drv->vbus) + regulator_disable(drv->vbus); +err_regulator: return ret; } @@ -55,7 +65,7 @@ static int samsung_usb2_phy_power_off(struct phy *phy) { struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy); struct samsung_usb2_phy_driver *drv = inst->drv; - int ret; + int ret = 0; dev_dbg(drv->dev, "Request to power_off \"%s\" usb phy\n", inst->cfg->label); @@ -68,7 +78,10 @@ static int samsung_usb2_phy_power_off(struct phy *phy) } clk_disable_unprepare(drv->ref_clk); clk_disable_unprepare(drv->clk); - return 0; + if (drv->vbus) + ret = regulator_disable(drv->vbus); + + return ret; } static const struct phy_ops samsung_usb2_phy_ops = { @@ -203,6 +216,14 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev) return ret; } + drv->vbus = devm_regulator_get(dev, "vbus"); + if (IS_ERR(drv->vbus)) { + ret = PTR_ERR(drv->vbus); + if (ret == -EPROBE_DEFER) + return ret; + drv->vbus = NULL; + } + for (i = 0; i < drv->cfg->num_phys; i++) { char *label = drv->cfg->phys[i].label; struct samsung_usb2_phy_instance *p = &drv->instances[i]; diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h index 44bead9b8f34..6563e7ca0ac4 100644 --- a/drivers/phy/phy-samsung-usb2.h +++ b/drivers/phy/phy-samsung-usb2.h @@ -17,6 +17,7 @@ #include <linux/device.h> #include <linux/regmap.h> #include <linux/spinlock.h> +#include <linux/regulator/consumer.h> #define KHZ 1000 #define MHZ (KHZ * KHZ) @@ -37,6 +38,7 @@ struct samsung_usb2_phy_driver { const struct samsung_usb2_phy_config *cfg; struct clk *clk; struct clk *ref_clk; + struct regulator *vbus; unsigned long ref_rate; u32 ref_reg_val; struct device *dev; diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index 731b395d6e6a..b12964b70625 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -551,19 +551,15 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) if (IS_ERR(data->base)) return PTR_ERR(data->base); - data->id_det_gpio = devm_gpiod_get(dev, "usb0_id_det", GPIOD_IN); - if (IS_ERR(data->id_det_gpio)) { - if (PTR_ERR(data->id_det_gpio) == -EPROBE_DEFER) - return -EPROBE_DEFER; - data->id_det_gpio = NULL; - } - - data->vbus_det_gpio = devm_gpiod_get(dev, "usb0_vbus_det", GPIOD_IN); - if (IS_ERR(data->vbus_det_gpio)) { - if (PTR_ERR(data->vbus_det_gpio) == -EPROBE_DEFER) - return -EPROBE_DEFER; - data->vbus_det_gpio = NULL; - } + data->id_det_gpio = devm_gpiod_get_optional(dev, "usb0_id_det", + GPIOD_IN); + if (IS_ERR(data->id_det_gpio)) + return PTR_ERR(data->id_det_gpio); + + data->vbus_det_gpio = devm_gpiod_get_optional(dev, "usb0_vbus_det", + GPIOD_IN); + if (IS_ERR(data->vbus_det_gpio)) + return PTR_ERR(data->vbus_det_gpio); if (of_find_property(np, "usb0_vbus_power-supply", NULL)) { data->vbus_power_supply = devm_power_supply_get_by_phandle(dev, diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 01bf3476a791..a9567af7cec0 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = { AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), /* secondary switchable output of DCDC1 */ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7849187d91ae..8a34f6acc801 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return 0; } + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) + return ret; + if (have_full_constraints()) { r = dummy_regulator_rdev; } else { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index add419d6ff34..a56a7b243e91 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = { .llseek = noop_llseek, }; +/* + * The controllers use an inline buffer instead of a mapped SGL for small, + * single entry buffers. Note that we treat a zero-length transfer like + * a mapped SGL. + */ +static bool twa_command_mapped(struct scsi_cmnd *cmd) +{ + return scsi_sg_count(cmd) != 1 || + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; +} + /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) struct scsi_cmnd *cmd = tw_dev->srb[i]; cmd->result = (DID_RESET << 16); - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); } } @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); break; case 1: SCpnt->result = (DID_ERROR << 16); - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); done(SCpnt); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - if ((scsi_sg_count(srb) == 1) && - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { + if (!twa_command_mapped(srb)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && + if (!twa_command_mapped(cmd) && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 33c74d3436c9..6bffd91b973a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) wake_up(&conn->ehwait); } -static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) +static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr && conn->ping_task) - return; + return -EINVAL; memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) hdr.ttt = RESERVED_ITT; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); - if (!task) + if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); - else if (!rhdr) { + return -EIO; + } else if (!rhdr) { /* only track our nops */ conn->ping_task = task; conn->last_ping = jiffies; } + + return 0; } static int iscsi_nop_out_rsp(struct iscsi_task *task, @@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data) if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); - iscsi_send_nopout(conn, NULL); - next_timeout = conn->last_ping + (conn->ping_timeout * HZ); + if (iscsi_send_nopout(conn, NULL)) + next_timeout = jiffies + (1 * HZ); + else + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index edb044a7b56d..0a2168e69bbc 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name) dh = __scsi_dh_lookup(name); if (!dh) { - request_module(name); + request_module("scsi_dh_%s", name); dh = __scsi_dh_lookup(name); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(cmd->request, cmd->request->errors); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3cf9faa6cc3f..a85d863d4a44 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev) goto free_master; } - dspi->irq = platform_get_irq(pdev, 0); - if (dspi->irq <= 0) { + ret = platform_get_irq(pdev, 0); + if (ret == 0) ret = -EINVAL; + if (ret < 0) goto free_master; - } + dspi->irq = ret; ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 4299cf45f947..5e1f16c36b49 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) __this_cpu_write(reporting_keystroke, true); input_report_key(virt_keyboard, KEY_DOWN, PRESSED); input_report_key(virt_keyboard, KEY_DOWN, RELEASED); + input_sync(virt_keyboard); __this_cpu_write(reporting_keystroke, false); /* reenable preemption */ diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 7ff96270c933..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, switch_on_temp = 0; temperature_threshold = control_temp - switch_on_temp; + /* + * estimate_pid_constants() tries to find appropriate default + * values for thermal zones that don't provide them. If a + * system integrator has configured a thermal zone with two + * passive trip points at the same temperature, that person + * hasn't put any effort to set up the thermal zone properly + * so just give up. + */ + if (!temperature_threshold) + return; if (!tz->tzp->k_po || force) tz->tzp->k_po = int_to_frac(sustainable_power) / diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 20932cc9c8f7..b09023b07169 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty) spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status |= TIOCPKT_FLUSHREAD; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - if (waitqueue_active(&tty->link->read_wait)) - wake_up_interruptible(&tty->link->read_wait); + wake_up_interruptible(&tty->link->read_wait); } } @@ -1382,8 +1381,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); return 0; } } @@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); } } @@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) } /* The termios change make the tty ready for I/O */ - if (waitqueue_active(&tty->write_wait)) - wake_up_interruptible(&tty->write_wait); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible(&tty->read_wait); + wake_up_interruptible(&tty->write_wait); + wake_up_interruptible(&tty->read_wait); } /** diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index b1e0ba3e525b..0bbf34035d6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */ UART_FCR7_64BYTE, .flags = UART_CAP_FIFO, }, + [PORT_RT2880] = { + .name = "Palmchip BK-3103", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 5ca5cf3e9359..538ea03bc101 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev) ret = atmel_init_gpios(port, &pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to initialize GPIOs."); - goto err; + goto err_clear_bit; } ret = atmel_init_port(port, pdev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fe3d41cc8416..d0388a071ba1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count) int locked = 1; int retval; - retval = clk_prepare_enable(sport->clk_per); + retval = clk_enable(sport->clk_per); if (retval) return; - retval = clk_prepare_enable(sport->clk_ipg); + retval = clk_enable(sport->clk_ipg); if (retval) { - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_per); return; } @@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); - clk_disable_unprepare(sport->clk_ipg); - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_ipg); + clk_disable(sport->clk_per); } /* @@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options) retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); - clk_disable_unprepare(sport->clk_ipg); + clk_disable(sport->clk_ipg); + if (retval) { + clk_unprepare(sport->clk_ipg); + goto error_console; + } + + retval = clk_prepare(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5a3fa8913880..a660ab181cca 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) atomic_inc(&buf->priority); mutex_lock(&buf->lock); - while ((next = buf->head->next) != NULL) { + /* paired w/ release in __tty_buffer_request_room; ensures there are + * no pending memory accesses to the freed buffer + */ + while ((next = smp_load_acquire(&buf->head->next)) != NULL) { tty_buffer_free(port, buf->head); buf->head = next; } @@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, if (n != NULL) { n->flags = flags; buf->tail = n; - b->commit = b->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&b->commit, b->used); /* paired w/ acquire in flush_to_ldisc(); ensures the * latest commit value can be read before the head is * advanced to the next buffer @@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - buf->tail->commit = buf->tail->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&buf->tail->commit, buf->tail->used); schedule_work(&buf->work); } EXPORT_SYMBOL(tty_schedule_flip); @@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_struct *tty; struct tty_ldisc *disc; - tty = port->itty; + tty = READ_ONCE(port->itty); if (tty == NULL) return; @@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work) * is advancing to the next buffer */ next = smp_load_acquire(&head->next); - count = head->commit - head->read; + /* paired w/ release in __tty_buffer_request_room() or in + * tty_buffer_flush(); ensures we see the committed buffer data + */ + count = smp_load_acquire(&head->commit) - head->read; if (!count) { if (next == NULL) { check_other_closed(tty); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 02785d844354..2eefaa6e3e3a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2128,8 +2128,24 @@ retry_open: if (!noctty && current->signal->leader && !current->signal->tty && - tty->session == NULL) - __proc_set_tty(tty); + tty->session == NULL) { + /* + * Don't let a process that only has write access to the tty + * obtain the privileges associated with having a tty as + * controlling terminal (being able to reopen it with full + * access through /dev/tty, being able to perform pushback). + * Many distributions set the group of all ttys to "tty" and + * grant write-only access to all terminals for setgid tty + * binaries, which should not imply full privileges on all ttys. + * + * This could theoretically break old code that performs open() + * on a write-only file descriptor. In that case, it might be + * necessary to also permit this if + * inode_permission(inode, MAY_READ) == 0. + */ + if (filp->f_mode & FMODE_READ) + __proc_set_tty(tty); + } spin_unlock_irq(¤t->sighand->siglock); read_unlock(&tasklist_lock); tty_unlock(tty); @@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p) * Takes ->siglock() when updating signal->tty */ -static int tiocsctty(struct tty_struct *tty, int arg) +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) { int ret = 0; @@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg) goto unlock; } } + + /* See the comment in tty_open(). */ + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto unlock; + } + proc_set_tty(tty); unlock: read_unlock(&tasklist_lock); @@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) no_tty(); return 0; case TIOCSCTTY: - return tiocsctty(tty, arg); + return tiocsctty(tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: @@ -3151,13 +3174,18 @@ struct class *tty_class; static int tty_cdev_add(struct tty_driver *driver, dev_t dev, unsigned int index, unsigned int count) { + int err; + /* init here, since reused cdevs cause crashes */ driver->cdevs[index] = cdev_alloc(); if (!driver->cdevs[index]) return -ENOMEM; - cdev_init(driver->cdevs[index], &tty_fops); + driver->cdevs[index]->ops = &tty_fops; driver->cdevs[index]->owner = driver->owner; - return cdev_add(driver->cdevs[index], dev, count); + err = cdev_add(driver->cdevs[index], dev, count); + if (err) + kobject_put(&driver->cdevs[index]->kobj); + return err; } /** diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index d8926c6cd2a8..d5c57f1e98fd 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -27,7 +27,6 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_HWA_HCD) += host/ obj-$(CONFIG_USB_IMX21_HCD) += host/ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ -obj-$(CONFIG_USB_FUSBH200_HCD) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ obj-$(CONFIG_USB_MAX3421_HCD) += host/ diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b9ddf0c1ffe5..7caff020106e 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -853,6 +853,10 @@ int usb_get_bos_descriptor(struct usb_device *dev) dev->bos->ss_cap = (struct usb_ss_cap_descriptor *)buffer; break; + case USB_SSP_CAP_TYPE: + dev->bos->ssp_cap = + (struct usb_ssp_cap_descriptor *)buffer; + break; case CONTAINER_ID_TYPE: dev->bos->ss_id = (struct usb_ss_container_id_descriptor *)buffer; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 6b5063e7943f..56593a9a8726 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -296,6 +296,10 @@ static int usb_probe_interface(struct device *dev) if (udev->authorized == 0) { dev_err(&intf->dev, "Device is not authorized for usage\n"); return error; + } else if (intf->authorized == 0) { + dev_err(&intf->dev, "Interface %d is not authorized for usage\n", + intf->altsetting->desc.bInterfaceNumber); + return error; } id = usb_match_dynamic_id(intf, driver); @@ -417,12 +421,10 @@ static int usb_unbind_interface(struct device *dev) if (ep->streams == 0) continue; if (j == 0) { - eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *), + eps = kmalloc_array(USB_MAXENDPOINTS, sizeof(void *), GFP_KERNEL); - if (!eps) { - dev_warn(dev, "oom, leaking streams\n"); + if (!eps) break; - } } eps[j++] = ep; } @@ -508,6 +510,10 @@ int usb_driver_claim_interface(struct usb_driver *driver, if (dev->driver) return -EBUSY; + /* reject claim if interface is not authorized */ + if (!iface->authorized) + return -ENODEV; + udev = interface_to_usbdev(iface); dev->driver = &driver->drvwrap.driver; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 4d64e5c499e1..1c102d60cd9f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -131,7 +131,7 @@ static inline int is_root_hub(struct usb_device *udev) /* usb 3.0 root hub device descriptor */ static const u8 usb3_rh_dev_descriptor[18] = { 0x12, /* __u8 bLength; */ - 0x01, /* __u8 bDescriptorType; Device */ + USB_DT_DEVICE, /* __u8 bDescriptorType; Device */ 0x00, 0x03, /* __le16 bcdUSB; v3.0 */ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ @@ -152,7 +152,7 @@ static const u8 usb3_rh_dev_descriptor[18] = { /* usb 2.5 (wireless USB 1.0) root hub device descriptor */ static const u8 usb25_rh_dev_descriptor[18] = { 0x12, /* __u8 bLength; */ - 0x01, /* __u8 bDescriptorType; Device */ + USB_DT_DEVICE, /* __u8 bDescriptorType; Device */ 0x50, 0x02, /* __le16 bcdUSB; v2.5 */ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ @@ -173,7 +173,7 @@ static const u8 usb25_rh_dev_descriptor[18] = { /* usb 2.0 root hub device descriptor */ static const u8 usb2_rh_dev_descriptor[18] = { 0x12, /* __u8 bLength; */ - 0x01, /* __u8 bDescriptorType; Device */ + USB_DT_DEVICE, /* __u8 bDescriptorType; Device */ 0x00, 0x02, /* __le16 bcdUSB; v2.0 */ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ @@ -196,7 +196,7 @@ static const u8 usb2_rh_dev_descriptor[18] = { /* usb 1.1 root hub device descriptor */ static const u8 usb11_rh_dev_descriptor[18] = { 0x12, /* __u8 bLength; */ - 0x01, /* __u8 bDescriptorType; Device */ + USB_DT_DEVICE, /* __u8 bDescriptorType; Device */ 0x10, 0x01, /* __le16 bcdUSB; v1.1 */ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ @@ -223,7 +223,7 @@ static const u8 fs_rh_config_descriptor[] = { /* one configuration */ 0x09, /* __u8 bLength; */ - 0x02, /* __u8 bDescriptorType; Configuration */ + USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */ 0x19, 0x00, /* __le16 wTotalLength; */ 0x01, /* __u8 bNumInterfaces; (1) */ 0x01, /* __u8 bConfigurationValue; */ @@ -248,7 +248,7 @@ static const u8 fs_rh_config_descriptor[] = { /* one interface */ 0x09, /* __u8 if_bLength; */ - 0x04, /* __u8 if_bDescriptorType; Interface */ + USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */ 0x00, /* __u8 if_bInterfaceNumber; */ 0x00, /* __u8 if_bAlternateSetting; */ 0x01, /* __u8 if_bNumEndpoints; */ @@ -259,7 +259,7 @@ static const u8 fs_rh_config_descriptor[] = { /* one endpoint (status change endpoint) */ 0x07, /* __u8 ep_bLength; */ - 0x05, /* __u8 ep_bDescriptorType; Endpoint */ + USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */ 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */ 0x03, /* __u8 ep_bmAttributes; Interrupt */ 0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */ @@ -270,7 +270,7 @@ static const u8 hs_rh_config_descriptor[] = { /* one configuration */ 0x09, /* __u8 bLength; */ - 0x02, /* __u8 bDescriptorType; Configuration */ + USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */ 0x19, 0x00, /* __le16 wTotalLength; */ 0x01, /* __u8 bNumInterfaces; (1) */ 0x01, /* __u8 bConfigurationValue; */ @@ -295,7 +295,7 @@ static const u8 hs_rh_config_descriptor[] = { /* one interface */ 0x09, /* __u8 if_bLength; */ - 0x04, /* __u8 if_bDescriptorType; Interface */ + USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */ 0x00, /* __u8 if_bInterfaceNumber; */ 0x00, /* __u8 if_bAlternateSetting; */ 0x01, /* __u8 if_bNumEndpoints; */ @@ -306,7 +306,7 @@ static const u8 hs_rh_config_descriptor[] = { /* one endpoint (status change endpoint) */ 0x07, /* __u8 ep_bLength; */ - 0x05, /* __u8 ep_bDescriptorType; Endpoint */ + USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */ 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */ 0x03, /* __u8 ep_bmAttributes; Interrupt */ /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) @@ -318,7 +318,7 @@ static const u8 hs_rh_config_descriptor[] = { static const u8 ss_rh_config_descriptor[] = { /* one configuration */ 0x09, /* __u8 bLength; */ - 0x02, /* __u8 bDescriptorType; Configuration */ + USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */ 0x1f, 0x00, /* __le16 wTotalLength; */ 0x01, /* __u8 bNumInterfaces; (1) */ 0x01, /* __u8 bConfigurationValue; */ @@ -332,7 +332,7 @@ static const u8 ss_rh_config_descriptor[] = { /* one interface */ 0x09, /* __u8 if_bLength; */ - 0x04, /* __u8 if_bDescriptorType; Interface */ + USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */ 0x00, /* __u8 if_bInterfaceNumber; */ 0x00, /* __u8 if_bAlternateSetting; */ 0x01, /* __u8 if_bNumEndpoints; */ @@ -343,7 +343,7 @@ static const u8 ss_rh_config_descriptor[] = { /* one endpoint (status change endpoint) */ 0x07, /* __u8 ep_bLength; */ - 0x05, /* __u8 ep_bDescriptorType; Endpoint */ + USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */ 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */ 0x03, /* __u8 ep_bmAttributes; Interrupt */ /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) @@ -353,7 +353,8 @@ static const u8 ss_rh_config_descriptor[] = { /* one SuperSpeed endpoint companion descriptor */ 0x06, /* __u8 ss_bLength */ - 0x30, /* __u8 ss_bDescriptorType; SuperSpeed EP Companion */ + USB_DT_SS_ENDPOINT_COMP, /* __u8 ss_bDescriptorType; SuperSpeed EP */ + /* Companion */ 0x00, /* __u8 ss_bMaxBurst; allows 1 TX between ACKs */ 0x00, /* __u8 ss_bmAttributes; 1 packet per service interval */ 0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */ @@ -555,6 +556,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) switch (wValue & 0xff00) { case USB_DT_DEVICE << 8: switch (hcd->speed) { + case HCD_USB31: case HCD_USB3: bufp = usb3_rh_dev_descriptor; break; @@ -576,6 +578,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) break; case USB_DT_CONFIG << 8: switch (hcd->speed) { + case HCD_USB31: case HCD_USB3: bufp = ss_rh_config_descriptor; len = sizeof ss_rh_config_descriptor; @@ -854,10 +857,10 @@ static ssize_t authorized_default_show(struct device *dev, { struct usb_device *rh_usb_dev = to_usb_device(dev); struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *usb_hcd; + struct usb_hcd *hcd; - usb_hcd = bus_to_hcd(usb_bus); - return snprintf(buf, PAGE_SIZE, "%u\n", usb_hcd->authorized_default); + hcd = bus_to_hcd(usb_bus); + return snprintf(buf, PAGE_SIZE, "%u\n", !!HCD_DEV_AUTHORIZED(hcd)); } static ssize_t authorized_default_store(struct device *dev, @@ -868,12 +871,16 @@ static ssize_t authorized_default_store(struct device *dev, unsigned val; struct usb_device *rh_usb_dev = to_usb_device(dev); struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *usb_hcd; + struct usb_hcd *hcd; - usb_hcd = bus_to_hcd(usb_bus); + hcd = bus_to_hcd(usb_bus); result = sscanf(buf, "%u\n", &val); if (result == 1) { - usb_hcd->authorized_default = val ? 1 : 0; + if (val) + set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + else + clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + result = size; } else { result = -EINVAL; @@ -882,9 +889,53 @@ static ssize_t authorized_default_store(struct device *dev, } static DEVICE_ATTR_RW(authorized_default); +/* + * interface_authorized_default_show - show default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + + return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd)); +} + +/* + * interface_authorized_default_store - store default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + int rc = count; + bool val; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + if (val) + set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + else + clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + + return rc; +} +static DEVICE_ATTR_RW(interface_authorized_default); + /* Group all the USB bus attributes */ static struct attribute *usb_bus_attrs[] = { &dev_attr_authorized_default.attr, + &dev_attr_interface_authorized_default.attr, NULL, }; @@ -2676,12 +2727,22 @@ int usb_add_hcd(struct usb_hcd *hcd, dev_info(hcd->self.controller, "%s\n", hcd->product_desc); /* Keep old behaviour if authorized_default is not in [0, 1]. */ - if (authorized_default < 0 || authorized_default > 1) - hcd->authorized_default = hcd->wireless ? 0 : 1; - else - hcd->authorized_default = authorized_default; + if (authorized_default < 0 || authorized_default > 1) { + if (hcd->wireless) + clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + else + set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + } else { + if (authorized_default) + set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + else + clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags); + } set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + /* per default all interfaces are authorized */ + set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + /* HC is in reset state, but accessible. Now do the one-time init, * bottom up so that hcds can customize the root hubs before hub_wq * starts talking to them. (Note, bus id is assigned early too.) @@ -2717,6 +2778,7 @@ int usb_add_hcd(struct usb_hcd *hcd, rhdev->speed = USB_SPEED_WIRELESS; break; case HCD_USB3: + case HCD_USB31: rhdev->speed = USB_SPEED_SUPER; break; default: diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 431839bd291f..bdeadc112d29 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1070,7 +1070,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) * for HUB_POST_RESET, but it's easier not to. */ if (type == HUB_INIT) { - unsigned delay = hub_power_on_good_delay(hub); + delay = hub_power_on_good_delay(hub); hub_power_on(hub, false); INIT_DELAYED_WORK(&hub->init_work, hub_init_func2); @@ -1404,7 +1404,6 @@ static int hub_configure(struct usb_hub *hub, /* FIXME for USB 3.0, skip for now */ if ((wHubCharacteristics & HUB_CHAR_COMPOUND) && !(hub_is_superspeed(hdev))) { - int i; char portstr[USB_MAXCHILDREN + 1]; for (i = 0; i < maxchild; i++) @@ -2240,39 +2239,49 @@ static int usb_enumerate_device_otg(struct usb_device *udev) && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; + unsigned port1 = udev->portnum; /* descriptor may appear anywhere in config */ - if (__usb_get_extra_descriptor(udev->rawdescriptors[0], - le16_to_cpu(udev->config[0].desc.wTotalLength), - USB_DT_OTG, (void **) &desc) == 0) { - if (desc->bmAttributes & USB_OTG_HNP) { - unsigned port1 = udev->portnum; + err = __usb_get_extra_descriptor(udev->rawdescriptors[0], + le16_to_cpu(udev->config[0].desc.wTotalLength), + USB_DT_OTG, (void **) &desc); + if (err || !(desc->bmAttributes & USB_OTG_HNP)) + return 0; - dev_info(&udev->dev, - "Dual-Role OTG device on %sHNP port\n", - (port1 == bus->otg_port) - ? "" : "non-"); - - /* enable HNP before suspend, it's simpler */ - if (port1 == bus->otg_port) - bus->b_hnp_enable = 1; - err = usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - USB_REQ_SET_FEATURE, 0, - bus->b_hnp_enable - ? USB_DEVICE_B_HNP_ENABLE - : USB_DEVICE_A_ALT_HNP_SUPPORT, - 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (err < 0) { - /* OTG MESSAGE: report errors here, - * customize to match your product. - */ - dev_info(&udev->dev, - "can't set HNP mode: %d\n", - err); - bus->b_hnp_enable = 0; - } + dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", + (port1 == bus->otg_port) ? "" : "non-"); + + /* enable HNP before suspend, it's simpler */ + if (port1 == bus->otg_port) { + bus->b_hnp_enable = 1; + err = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_B_HNP_ENABLE, + 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (err < 0) { + /* + * OTG MESSAGE: report errors here, + * customize to match your product. + */ + dev_err(&udev->dev, "can't set HNP mode: %d\n", + err); + bus->b_hnp_enable = 0; } + } else if (desc->bLength == sizeof + (struct usb_otg_descriptor)) { + /* Set a_alt_hnp_support for legacy otg device */ + err = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_A_ALT_HNP_SUPPORT, + 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (err < 0) + dev_err(&udev->dev, + "set a_alt_hnp_support failed: %d\n", + err); } } #endif @@ -4222,7 +4231,7 @@ static int hub_enable_device(struct usb_device *udev) * but it is still necessary to lock the port. */ static int -hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, +hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, int retry_counter) { struct usb_device *hdev = hub->hdev; @@ -4526,7 +4535,7 @@ fail: } static void -check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) +check_highspeed(struct usb_hub *hub, struct usb_device *udev, int port1) { struct usb_qualifier_descriptor *qual; int status; @@ -4534,11 +4543,11 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER) return; - qual = kmalloc (sizeof *qual, GFP_KERNEL); + qual = kmalloc(sizeof *qual, GFP_KERNEL); if (qual == NULL) return; - status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0, + status = usb_get_descriptor(udev, USB_DT_DEVICE_QUALIFIER, 0, qual, sizeof *qual); if (status == sizeof *qual) { dev_info(&udev->dev, "not running at top speed; " @@ -4554,7 +4563,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) } static unsigned -hub_power_remaining (struct usb_hub *hub) +hub_power_remaining(struct usb_hub *hub) { struct usb_device *hdev = hub->hdev; int remaining; @@ -4741,7 +4750,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 && udev->speed == USB_SPEED_FULL && highspeed_hubs != 0) - check_highspeed (hub, udev, port1); + check_highspeed(hub, udev, port1); /* Store the parent's children[] pointer. At this point * udev becomes globally accessible, although presumably @@ -5115,7 +5124,7 @@ static const struct usb_device_id hub_id_table[] = { { } /* Terminating entry */ }; -MODULE_DEVICE_TABLE (usb, hub_id_table); +MODULE_DEVICE_TABLE(usb, hub_id_table); static struct usb_driver hub_driver = { .name = "hub", @@ -5227,7 +5236,7 @@ static int descriptors_changed(struct usb_device *udev, changed = 1; break; } - if (memcmp (buf, udev->rawdescriptors[index], old_length) + if (memcmp(buf, udev->rawdescriptors[index], old_length) != 0) { dev_dbg(&udev->dev, "config index %d changed (#%d)\n", index, diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index f368d2053da5..8e641b5893ed 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1387,8 +1387,6 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) * new altsetting. */ if (manual) { - int i; - for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, @@ -1555,6 +1553,44 @@ static void usb_release_interface(struct device *dev) kfree(intf); } +/* + * usb_deauthorize_interface - deauthorize an USB interface + * + * @intf: USB interface structure + */ +void usb_deauthorize_interface(struct usb_interface *intf) +{ + struct device *dev = &intf->dev; + + device_lock(dev->parent); + + if (intf->authorized) { + device_lock(dev); + intf->authorized = 0; + device_unlock(dev); + + usb_forced_unbind_intf(intf); + } + + device_unlock(dev->parent); +} + +/* + * usb_authorize_interface - authorize an USB interface + * + * @intf: USB interface structure + */ +void usb_authorize_interface(struct usb_interface *intf) +{ + struct device *dev = &intf->dev; + + if (!intf->authorized) { + device_lock(dev); + intf->authorized = 1; /* authorize interface */ + device_unlock(dev); + } +} + static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_device *usb_dev; @@ -1807,6 +1843,7 @@ free_interfaces: intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; + intf->authorized = !!HCD_INTF_AUTHORIZED(hcd); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d85abfed84cc..f5a381945db2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech ConferenceCam CC3000e */ + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Logitech PTZ Pro Camera */ + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Plantronic Audio 655 DSP */ + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Plantronic Audio 648 USB */ + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Artisman Watchdog Dongle */ { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index cfc68c11c3f5..d9ec2de6c4cf 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -957,6 +957,41 @@ static ssize_t supports_autosuspend_show(struct device *dev, } static DEVICE_ATTR_RO(supports_autosuspend); +/* + * interface_authorized_show - show authorization status of an USB interface + * 1 is authorized, 0 is deauthorized + */ +static ssize_t interface_authorized_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(dev); + + return sprintf(buf, "%u\n", intf->authorized); +} + +/* + * interface_authorized_store - authorize or deauthorize an USB interface + */ +static ssize_t interface_authorized_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_interface *intf = to_usb_interface(dev); + bool val; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + if (val) + usb_authorize_interface(intf); + else + usb_deauthorize_interface(intf); + + return count; +} +static struct device_attribute dev_attr_interface_authorized = + __ATTR(authorized, S_IRUGO | S_IWUSR, + interface_authorized_show, interface_authorized_store); + static struct attribute *intf_attrs[] = { &dev_attr_bInterfaceNumber.attr, &dev_attr_bAlternateSetting.attr, @@ -966,6 +1001,7 @@ static struct attribute *intf_attrs[] = { &dev_attr_bInterfaceProtocol.attr, &dev_attr_modalias.attr, &dev_attr_supports_autosuspend.attr, + &dev_attr_interface_authorized.attr, NULL, }; static struct attribute_group intf_attr_grp = { diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 8d5b2f4113cd..f8bbd0b6d9fe 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -510,7 +510,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, if (root_hub) /* Root hub always ok [and always wired] */ dev->authorized = 1; else { - dev->authorized = usb_hcd->authorized_default; + dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd); dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0; } return dev; diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 457255a3306a..05b5e17abf92 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -27,6 +27,8 @@ extern void usb_release_interface_cache(struct kref *ref); extern void usb_disable_device(struct usb_device *dev, int skip_ep0); extern int usb_deauthorize_device(struct usb_device *); extern int usb_authorize_device(struct usb_device *); +extern void usb_deauthorize_interface(struct usb_interface *); +extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern int usb_remove_device(struct usb_device *udev); diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d1b81539d632..d6199507f861 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, GFP_ATOMIC, &dma); - if (!bd_table->start_bd) + if (!bd_table->start_bd) { + kfree(bd_table); goto fail; + } bd_table->dma = dma; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 079991e283e9..3bb08870148f 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -348,16 +348,6 @@ config USB_ISP1362_HCD To compile this driver as a module, choose M here: the module will be called isp1362-hcd. -config USB_FUSBH200_HCD - tristate "FUSBH200 HCD support" - depends on USB - ---help--- - Faraday FUSBH200 is designed to meet USB2.0 EHCI specification - with minor modification. - - To compile this driver as a module, choose M here: the - module will be called fusbh200-hcd. - config USB_FOTG210_HCD tristate "FOTG210 HCD support" depends on USB diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 754efaa8ccf8..e7558abc994d 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -28,9 +28,6 @@ ifneq ($(CONFIG_USB), ) obj-$(CONFIG_PCI) += pci-quirks.o endif -obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o -obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o - obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o @@ -65,6 +62,8 @@ obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_FHCI_HCD) += fhci.o obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o +obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o +obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o @@ -75,6 +74,5 @@ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o -obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index 275c92e53a59..c4f84c81de01 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c @@ -80,12 +80,12 @@ static int ehci_msm_probe(struct platform_device *pdev) return -ENOMEM; } - hcd->irq = platform_get_irq(pdev, 0); - if (hcd->irq < 0) { + ret = platform_get_irq(pdev, 0); + if (ret < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); - ret = hcd->irq; goto put_hcd; } + hcd->irq = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index bfcbb9aa8816..ee8d5faa0194 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -224,7 +224,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) priv->phy = devm_phy_optional_get(&pdev->dev, "usb"); if (IS_ERR(priv->phy)) { err = PTR_ERR(priv->phy); - goto err_phy_get; + if (err != -ENOSYS) + goto err_phy_get; } else { err = phy_init(priv->phy); if (err) diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 5c3c08598682..bd7082f297bb 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -19,6 +19,7 @@ * * Licensed under the GNU/GPL. See COPYING for details. */ +#include <linux/acpi.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> @@ -162,8 +163,10 @@ static int ehci_platform_probe(struct platform_device *dev) err = dma_coerce_mask_and_coherent(&dev->dev, pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); - if (err) + if (err) { + dev_err(&dev->dev, "Error: DMA mask configuration failed\n"); return err; + } irq = platform_get_irq(dev, 0); if (irq < 0) { @@ -385,6 +388,12 @@ static const struct of_device_id vt8500_ehci_ids[] = { }; MODULE_DEVICE_TABLE(of, vt8500_ehci_ids); +static const struct acpi_device_id ehci_acpi_match[] = { + { "PNP0D20", 0 }, /* EHCI controller without debug */ + { } +}; +MODULE_DEVICE_TABLE(acpi, ehci_acpi_match); + static const struct platform_device_id ehci_platform_table[] = { { "ehci-platform", 0 }, { } @@ -403,6 +412,7 @@ static struct platform_driver ehci_platform_driver = { .name = "ehci-platform", .pm = &ehci_platform_pm_ops, .of_match_table = vt8500_ehci_ids, + .acpi_match_table = ACPI_PTR(ehci_acpi_match), } }; diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index 34e14746b92e..3c4e5253955c 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -149,6 +149,7 @@ static const struct of_device_id spear_ehci_id_table[] = { { .compatible = "st,spear600-ehci", }, { }, }; +MODULE_DEVICE_TABLE(of, spear_ehci_id_table); static struct platform_driver spear_ehci_hcd_driver = { .probe = spear_ehci_hcd_drv_probe, diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 000ed80ab592..787f4e3d16d8 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -1,5 +1,4 @@ -/* - * Faraday FOTG210 EHCI-like driver +/* Faraday FOTG210 EHCI-like driver * * Copyright (c) 2013 Faraday Technology Corporation * @@ -50,32 +49,29 @@ #include <asm/irq.h> #include <asm/unaligned.h> -/*-------------------------------------------------------------------------*/ #define DRIVER_AUTHOR "Yuan-Hsin Chen" #define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver" - -static const char hcd_name[] = "fotg210_hcd"; +static const char hcd_name[] = "fotg210_hcd"; #undef FOTG210_URB_TRACE - #define FOTG210_STATS /* magic numbers that can affect system performance */ -#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ -#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ -#define FOTG210_TUNE_RL_TT 0 -#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ -#define FOTG210_TUNE_MULT_TT 1 -/* - * Some drivers think it's safe to schedule isochronous transfers more than - * 256 ms into the future (partly as a result of an old bug in the scheduling +#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ +#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ +#define FOTG210_TUNE_RL_TT 0 +#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ +#define FOTG210_TUNE_MULT_TT 1 + +/* Some drivers think it's safe to schedule isochronous transfers more than 256 + * ms into the future (partly as a result of an old bug in the scheduling * code). In an attempt to avoid trouble, we will use a minimum scheduling * length of 512 frames instead of 256. */ -#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */ +#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */ /* Initial IRQ latency: faster than hw default */ -static int log2_irq_thresh; /* 0 to 6 */ +static int log2_irq_thresh; /* 0 to 6 */ module_param(log2_irq_thresh, int, S_IRUGO); MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); @@ -89,66 +85,57 @@ static unsigned int hird; module_param(hird, int, S_IRUGO); MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); -#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) +#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) #include "fotg210.h" -/*-------------------------------------------------------------------------*/ - #define fotg210_dbg(fotg210, fmt, args...) \ - dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) + dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args) #define fotg210_err(fotg210, fmt, args...) \ - dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) + dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args) #define fotg210_info(fotg210, fmt, args...) \ - dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) + dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args) #define fotg210_warn(fotg210, fmt, args...) \ - dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args) + dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args) -/* check the values in the HCSPARAMS register - * (host controller _Structural_ parameters) - * see EHCI spec, Table 2-4 for each value +/* check the values in the HCSPARAMS register (host controller _Structural_ + * parameters) see EHCI spec, Table 2-4 for each value */ static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) { - u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params); + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params); - fotg210_dbg(fotg210, - "%s hcs_params 0x%x ports=%d\n", - label, params, - HCS_N_PORTS(params) - ); + fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params, + HCS_N_PORTS(params)); } -/* check the values in the HCCPARAMS register - * (host controller _Capability_ parameters) - * see EHCI Spec, Table 2-5 for each value - * */ +/* check the values in the HCCPARAMS register (host controller _Capability_ + * parameters) see EHCI Spec, Table 2-5 for each value + */ static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) { - u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params); - fotg210_dbg(fotg210, - "%s hcc_params %04x uframes %s%s\n", - label, - params, - HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", - HCC_CANPARK(params) ? " park" : ""); + fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label, + params, + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : ""); } static void __maybe_unused dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd) { fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, - hc32_to_cpup(fotg210, &qtd->hw_next), - hc32_to_cpup(fotg210, &qtd->hw_alt_next), - hc32_to_cpup(fotg210, &qtd->hw_token), - hc32_to_cpup(fotg210, &qtd->hw_buf[0])); + hc32_to_cpup(fotg210, &qtd->hw_next), + hc32_to_cpup(fotg210, &qtd->hw_alt_next), + hc32_to_cpup(fotg210, &qtd->hw_token), + hc32_to_cpup(fotg210, &qtd->hw_buf[0])); if (qtd->hw_buf[1]) fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n", - hc32_to_cpup(fotg210, &qtd->hw_buf[1]), - hc32_to_cpup(fotg210, &qtd->hw_buf[2]), - hc32_to_cpup(fotg210, &qtd->hw_buf[3]), - hc32_to_cpup(fotg210, &qtd->hw_buf[4])); + hc32_to_cpup(fotg210, &qtd->hw_buf[1]), + hc32_to_cpup(fotg210, &qtd->hw_buf[2]), + hc32_to_cpup(fotg210, &qtd->hw_buf[3]), + hc32_to_cpup(fotg210, &qtd->hw_buf[4])); } static void __maybe_unused @@ -156,101 +143,100 @@ dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { struct fotg210_qh_hw *hw = qh->hw; - fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, - qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); + fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh, + hw->hw_next, hw->hw_info1, hw->hw_info2, + hw->hw_current); + dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next); } static void __maybe_unused dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd) { - fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", - label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next), - itd->urb); + fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label, + itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next), + itd->urb); + fotg210_dbg(fotg210, - " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", - hc32_to_cpu(fotg210, itd->hw_transaction[0]), - hc32_to_cpu(fotg210, itd->hw_transaction[1]), - hc32_to_cpu(fotg210, itd->hw_transaction[2]), - hc32_to_cpu(fotg210, itd->hw_transaction[3]), - hc32_to_cpu(fotg210, itd->hw_transaction[4]), - hc32_to_cpu(fotg210, itd->hw_transaction[5]), - hc32_to_cpu(fotg210, itd->hw_transaction[6]), - hc32_to_cpu(fotg210, itd->hw_transaction[7])); + " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(fotg210, itd->hw_transaction[0]), + hc32_to_cpu(fotg210, itd->hw_transaction[1]), + hc32_to_cpu(fotg210, itd->hw_transaction[2]), + hc32_to_cpu(fotg210, itd->hw_transaction[3]), + hc32_to_cpu(fotg210, itd->hw_transaction[4]), + hc32_to_cpu(fotg210, itd->hw_transaction[5]), + hc32_to_cpu(fotg210, itd->hw_transaction[6]), + hc32_to_cpu(fotg210, itd->hw_transaction[7])); + fotg210_dbg(fotg210, - " buf: %08x %08x %08x %08x %08x %08x %08x\n", - hc32_to_cpu(fotg210, itd->hw_bufp[0]), - hc32_to_cpu(fotg210, itd->hw_bufp[1]), - hc32_to_cpu(fotg210, itd->hw_bufp[2]), - hc32_to_cpu(fotg210, itd->hw_bufp[3]), - hc32_to_cpu(fotg210, itd->hw_bufp[4]), - hc32_to_cpu(fotg210, itd->hw_bufp[5]), - hc32_to_cpu(fotg210, itd->hw_bufp[6])); + " buf: %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(fotg210, itd->hw_bufp[0]), + hc32_to_cpu(fotg210, itd->hw_bufp[1]), + hc32_to_cpu(fotg210, itd->hw_bufp[2]), + hc32_to_cpu(fotg210, itd->hw_bufp[3]), + hc32_to_cpu(fotg210, itd->hw_bufp[4]), + hc32_to_cpu(fotg210, itd->hw_bufp[5]), + hc32_to_cpu(fotg210, itd->hw_bufp[6])); + fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n", - itd->index[0], itd->index[1], itd->index[2], - itd->index[3], itd->index[4], itd->index[5], - itd->index[6], itd->index[7]); + itd->index[0], itd->index[1], itd->index[2], + itd->index[3], itd->index[4], itd->index[5], + itd->index[6], itd->index[7]); } static int __maybe_unused dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) { - return scnprintf(buf, len, - "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", - label, label[0] ? " " : "", status, - (status & STS_ASS) ? " Async" : "", - (status & STS_PSS) ? " Periodic" : "", - (status & STS_RECL) ? " Recl" : "", - (status & STS_HALT) ? " Halt" : "", - (status & STS_IAA) ? " IAA" : "", - (status & STS_FATAL) ? " FATAL" : "", - (status & STS_FLR) ? " FLR" : "", - (status & STS_PCD) ? " PCD" : "", - (status & STS_ERR) ? " ERR" : "", - (status & STS_INT) ? " INT" : "" - ); + return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", + label, label[0] ? " " : "", status, + (status & STS_ASS) ? " Async" : "", + (status & STS_PSS) ? " Periodic" : "", + (status & STS_RECL) ? " Recl" : "", + (status & STS_HALT) ? " Halt" : "", + (status & STS_IAA) ? " IAA" : "", + (status & STS_FATAL) ? " FATAL" : "", + (status & STS_FLR) ? " FLR" : "", + (status & STS_PCD) ? " PCD" : "", + (status & STS_ERR) ? " ERR" : "", + (status & STS_INT) ? " INT" : ""); } static int __maybe_unused dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) { - return scnprintf(buf, len, - "%s%sintrenable %02x%s%s%s%s%s%s", - label, label[0] ? " " : "", enable, - (enable & STS_IAA) ? " IAA" : "", - (enable & STS_FATAL) ? " FATAL" : "", - (enable & STS_FLR) ? " FLR" : "", - (enable & STS_PCD) ? " PCD" : "", - (enable & STS_ERR) ? " ERR" : "", - (enable & STS_INT) ? " INT" : "" - ); + return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s", + label, label[0] ? " " : "", enable, + (enable & STS_IAA) ? " IAA" : "", + (enable & STS_FATAL) ? " FATAL" : "", + (enable & STS_FLR) ? " FLR" : "", + (enable & STS_PCD) ? " PCD" : "", + (enable & STS_ERR) ? " ERR" : "", + (enable & STS_INT) ? " INT" : ""); } static const char *const fls_strings[] = { "1024", "512", "256", "??" }; -static int -dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) +static int dbg_command_buf(char *buf, unsigned len, const char *label, + u32 command) { return scnprintf(buf, len, - "%s%scommand %07x %s=%d ithresh=%d%s%s%s " - "period=%s%s %s", - label, label[0] ? " " : "", command, - (command & CMD_PARK) ? " park" : "(park)", - CMD_PARK_CNT(command), - (command >> 16) & 0x3f, - (command & CMD_IAAD) ? " IAAD" : "", - (command & CMD_ASE) ? " Async" : "", - (command & CMD_PSE) ? " Periodic" : "", - fls_strings[(command >> 2) & 0x3], - (command & CMD_RESET) ? " Reset" : "", - (command & CMD_RUN) ? "RUN" : "HALT" - ); -} - -static char -*dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) -{ - char *sig; + "%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s", + label, label[0] ? " " : "", command, + (command & CMD_PARK) ? " park" : "(park)", + CMD_PARK_CNT(command), + (command >> 16) & 0x3f, + (command & CMD_IAAD) ? " IAAD" : "", + (command & CMD_ASE) ? " Async" : "", + (command & CMD_PSE) ? " Periodic" : "", + fls_strings[(command >> 2) & 0x3], + (command & CMD_RESET) ? " Reset" : "", + (command & CMD_RUN) ? "RUN" : "HALT"); +} + +static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port, + u32 status) +{ + char *sig; /* signaling state */ switch (status & (3 << 10)) { @@ -268,44 +254,41 @@ static char break; } - scnprintf(buf, len, - "%s%sport:%d status %06x %d " - "sig=%s%s%s%s%s%s%s%s", - label, label[0] ? " " : "", port, status, - status>>25,/*device address */ - sig, - (status & PORT_RESET) ? " RESET" : "", - (status & PORT_SUSPEND) ? " SUSPEND" : "", - (status & PORT_RESUME) ? " RESUME" : "", - (status & PORT_PEC) ? " PEC" : "", - (status & PORT_PE) ? " PE" : "", - (status & PORT_CSC) ? " CSC" : "", - (status & PORT_CONNECT) ? " CONNECT" : ""); + scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s", + label, label[0] ? " " : "", port, status, + status >> 25, /*device address */ + sig, + (status & PORT_RESET) ? " RESET" : "", + (status & PORT_SUSPEND) ? " SUSPEND" : "", + (status & PORT_RESUME) ? " RESUME" : "", + (status & PORT_PEC) ? " PEC" : "", + (status & PORT_PE) ? " PE" : "", + (status & PORT_CSC) ? " CSC" : "", + (status & PORT_CONNECT) ? " CONNECT" : ""); + return buf; } /* functions have the "wrong" filename when they're output... */ -#define dbg_status(fotg210, label, status) { \ - char _buf[80]; \ - dbg_status_buf(_buf, sizeof(_buf), label, status); \ - fotg210_dbg(fotg210, "%s\n", _buf); \ +#define dbg_status(fotg210, label, status) { \ + char _buf[80]; \ + dbg_status_buf(_buf, sizeof(_buf), label, status); \ + fotg210_dbg(fotg210, "%s\n", _buf); \ } -#define dbg_cmd(fotg210, label, command) { \ - char _buf[80]; \ - dbg_command_buf(_buf, sizeof(_buf), label, command); \ - fotg210_dbg(fotg210, "%s\n", _buf); \ +#define dbg_cmd(fotg210, label, command) { \ + char _buf[80]; \ + dbg_command_buf(_buf, sizeof(_buf), label, command); \ + fotg210_dbg(fotg210, "%s\n", _buf); \ } -#define dbg_port(fotg210, label, port, status) { \ - char _buf[80]; \ - fotg210_dbg(fotg210, "%s\n", dbg_port_buf(_buf, sizeof(_buf), label, port, status) ); \ +#define dbg_port(fotg210, label, port, status) { \ + char _buf[80]; \ + fotg210_dbg(fotg210, "%s\n", \ + dbg_port_buf(_buf, sizeof(_buf), label, port, status));\ } -/*-------------------------------------------------------------------------*/ - /* troubleshooting help: expose state in debugfs */ - static int debug_async_open(struct inode *, struct file *); static int debug_periodic_open(struct inode *, struct file *); static int debug_registers_open(struct inode *, struct file *); @@ -347,17 +330,22 @@ struct debug_buffer { size_t alloc_size; }; -#define speed_char(info1)({ char tmp; \ - switch (info1 & (3 << 12)) { \ - case QH_FULL_SPEED: \ - tmp = 'f'; break; \ - case QH_LOW_SPEED: \ - tmp = 'l'; break; \ - case QH_HIGH_SPEED: \ - tmp = 'h'; break; \ - default: \ - tmp = '?'; break; \ - } tmp; }) +static inline char speed_char(u32 scratch) +{ + switch (scratch & (3 << 12)) { + case QH_FULL_SPEED: + return 'f'; + + case QH_LOW_SPEED: + return 'l'; + + case QH_HIGH_SPEED: + return 'h'; + + default: + return '?'; + } +} static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token) { @@ -373,33 +361,29 @@ static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token) return '/'; } -static void qh_lines( - struct fotg210_hcd *fotg210, - struct fotg210_qh *qh, - char **nextp, - unsigned *sizep -) -{ - u32 scratch; - u32 hw_curr; - struct fotg210_qtd *td; - unsigned temp; - unsigned size = *sizep; - char *next = *nextp; - char mark; - __le32 list_end = FOTG210_LIST_END(fotg210); - struct fotg210_qh_hw *hw = qh->hw; - - if (hw->hw_qtd_next == list_end) /* NEC does this */ +static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, + char **nextp, unsigned *sizep) +{ + u32 scratch; + u32 hw_curr; + struct fotg210_qtd *td; + unsigned temp; + unsigned size = *sizep; + char *next = *nextp; + char mark; + __le32 list_end = FOTG210_LIST_END(fotg210); + struct fotg210_qh_hw *hw = qh->hw; + + if (hw->hw_qtd_next == list_end) /* NEC does this */ mark = '@'; else mark = token_mark(fotg210, hw->hw_token); - if (mark == '/') { /* qh_alt_next controls qh advance? */ - if ((hw->hw_alt_next & QTD_MASK(fotg210)) - == fotg210->async->hw->hw_alt_next) - mark = '#'; /* blocked */ + if (mark == '/') { /* qh_alt_next controls qh advance? */ + if ((hw->hw_alt_next & QTD_MASK(fotg210)) == + fotg210->async->hw->hw_alt_next) + mark = '#'; /* blocked */ else if (hw->hw_alt_next == list_end) - mark = '.'; /* use hw_qtd_next */ + mark = '.'; /* use hw_qtd_next */ /* else alt_next points to some other qtd */ } scratch = hc32_to_cpup(fotg210, &hw->hw_info1); @@ -462,6 +446,7 @@ static void qh_lines( temp = snprintf(next, size, "\n"); if (size < temp) temp = size; + size -= temp; next += temp; @@ -472,12 +457,12 @@ done: static ssize_t fill_async_buffer(struct debug_buffer *buf) { - struct usb_hcd *hcd; - struct fotg210_hcd *fotg210; - unsigned long flags; - unsigned temp, size; - char *next; - struct fotg210_qh *qh; + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + unsigned temp, size; + char *next; + struct fotg210_qh *qh; hcd = bus_to_hcd(buf->bus); fotg210 = hcd_to_fotg210(hcd); @@ -492,7 +477,7 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf) */ spin_lock_irqsave(&fotg210->lock, flags); for (qh = fotg210->async->qh_next.qh; size > 0 && qh; - qh = qh->qh_next.qh) + qh = qh->qh_next.qh) qh_lines(fotg210, qh, &next, &size); if (fotg210->async_unlink && size > 0) { temp = scnprintf(next, size, "\nunlink =\n"); @@ -508,21 +493,50 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf) return strlen(buf->output_buf); } +/* count tds, get ep direction */ +static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210, + struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size) +{ + u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1); + struct fotg210_qtd *qtd; + char *type = ""; + unsigned temp = 0; + + /* count tds, get ep direction */ + list_for_each_entry(qtd, &qh->qtd_list, qtd_list) { + temp++; + switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) { + case 0: + type = "out"; + continue; + case 1: + type = "in"; + continue; + } + } + + return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)", + speed_char(scratch), scratch & 0x007f, + (scratch >> 8) & 0x000f, type, qh->usecs, + qh->c_usecs, temp, (scratch >> 16) & 0x7ff); +} + #define DBG_SCHED_LIMIT 64 static ssize_t fill_periodic_buffer(struct debug_buffer *buf) { - struct usb_hcd *hcd; - struct fotg210_hcd *fotg210; - unsigned long flags; - union fotg210_shadow p, *seen; - unsigned temp, size, seen_count; - char *next; - unsigned i; - __hc32 tag; - - seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC); + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + union fotg210_shadow p, *seen; + unsigned temp, size, seen_count; + char *next; + unsigned i; + __hc32 tag; + + seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC); if (!seen) return 0; + seen_count = 0; hcd = bus_to_hcd(buf->bus); @@ -542,6 +556,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) p = fotg210->pshadow[i]; if (likely(!p.ptr)) continue; + tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]); temp = scnprintf(next, size, "%4d: ", i); @@ -569,7 +584,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) continue; if (p.qh->qh_next.ptr) { temp = scnprintf(next, size, - " ..."); + " ..."); size -= temp; next += temp; } @@ -577,38 +592,9 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) } /* show more info the first time around */ if (temp == seen_count) { - u32 scratch = hc32_to_cpup(fotg210, - &hw->hw_info1); - struct fotg210_qtd *qtd; - char *type = ""; - - /* count tds, get ep direction */ - temp = 0; - list_for_each_entry(qtd, - &p.qh->qtd_list, - qtd_list) { - temp++; - switch (0x03 & (hc32_to_cpu( - fotg210, - qtd->hw_token) >> 8)) { - case 0: - type = "out"; - continue; - case 1: - type = "in"; - continue; - } - } - - temp = scnprintf(next, size, - "(%c%d ep%d%s " - "[%d/%d] q%d p%d)", - speed_char(scratch), - scratch & 0x007f, - (scratch >> 8) & 0x000f, type, - p.qh->usecs, p.qh->c_usecs, - temp, - 0x7ff & (scratch >> 16)); + temp = output_buf_tds_dir(next, + fotg210, hw, + p.qh, size); if (seen_count < DBG_SCHED_LIMIT) seen[seen_count++].qh = p.qh; @@ -619,14 +605,14 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) break; case Q_TYPE_FSTN: temp = scnprintf(next, size, - " fstn-%8x/%p", p.fstn->hw_prev, - p.fstn); + " fstn-%8x/%p", + p.fstn->hw_prev, p.fstn); tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next); p = p.fstn->fstn_next; break; case Q_TYPE_ITD: temp = scnprintf(next, size, - " itd/%p", p.itd); + " itd/%p", p.itd); tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next); p = p.itd->itd_next; break; @@ -663,13 +649,13 @@ static const char *rh_state_string(struct fotg210_hcd *fotg210) static ssize_t fill_registers_buffer(struct debug_buffer *buf) { - struct usb_hcd *hcd; - struct fotg210_hcd *fotg210; - unsigned long flags; - unsigned temp, size, i; - char *next, scratch[80]; - static const char fmt[] = "%*s\n"; - static const char label[] = ""; + struct usb_hcd *hcd; + struct fotg210_hcd *fotg210; + unsigned long flags; + unsigned temp, size, i; + char *next, scratch[80]; + static const char fmt[] = "%*s\n"; + static const char label[] = ""; hcd = bus_to_hcd(buf->bus); fotg210 = hcd_to_fotg210(hcd); @@ -680,26 +666,26 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) if (!HCD_HW_ACCESSIBLE(hcd)) { size = scnprintf(next, size, - "bus %s, device %s\n" - "%s\n" - "SUSPENDED(no register access)\n", - hcd->self.controller->bus->name, - dev_name(hcd->self.controller), - hcd->product_desc); + "bus %s, device %s\n" + "%s\n" + "SUSPENDED(no register access)\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc); goto done; } /* Capability Registers */ i = HC_VERSION(fotg210, fotg210_readl(fotg210, - &fotg210->caps->hc_capbase)); + &fotg210->caps->hc_capbase)); temp = scnprintf(next, size, - "bus %s, device %s\n" - "%s\n" - "EHCI %x.%02x, rh state %s\n", - hcd->self.controller->bus->name, - dev_name(hcd->self.controller), - hcd->product_desc, - i >> 8, i & 0x0ff, rh_state_string(fotg210)); + "bus %s, device %s\n" + "%s\n" + "EHCI %x.%02x, rh state %s\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc, + i >> 8, i & 0x0ff, rh_state_string(fotg210)); size -= temp; next += temp; @@ -747,14 +733,14 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) #ifdef FOTG210_STATS temp = scnprintf(next, size, - "irq normal %ld err %ld iaa %ld(lost %ld)\n", - fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, - fotg210->stats.lost_iaa); + "irq normal %ld err %ld iaa %ld(lost %ld)\n", + fotg210->stats.normal, fotg210->stats.error, + fotg210->stats.iaa, fotg210->stats.lost_iaa); size -= temp; next += temp; temp = scnprintf(next, size, "complete %ld unlink %ld\n", - fotg210->stats.complete, fotg210->stats.unlink); + fotg210->stats.complete, fotg210->stats.unlink); size -= temp; next += temp; #endif @@ -765,8 +751,8 @@ done: return buf->alloc_size - size; } -static struct debug_buffer *alloc_buffer(struct usb_bus *bus, - ssize_t (*fill_func)(struct debug_buffer *)) +static struct debug_buffer +*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *)) { struct debug_buffer *buf; @@ -806,7 +792,7 @@ out: } static ssize_t debug_output(struct file *file, char __user *user_buf, - size_t len, loff_t *offset) + size_t len, loff_t *offset) { struct debug_buffer *buf = file->private_data; int ret = 0; @@ -822,7 +808,7 @@ static ssize_t debug_output(struct file *file, char __user *user_buf, mutex_unlock(&buf->mutex); ret = simple_read_from_buffer(user_buf, len, offset, - buf->output_buf, buf->count); + buf->output_buf, buf->count); out: return ret; @@ -850,6 +836,7 @@ static int debug_async_open(struct inode *inode, struct file *file) static int debug_periodic_open(struct inode *inode, struct file *file) { struct debug_buffer *buf; + buf = alloc_buffer(inode->i_private, fill_periodic_buffer); if (!buf) return -ENOMEM; @@ -862,7 +849,7 @@ static int debug_periodic_open(struct inode *inode, struct file *file) static int debug_registers_open(struct inode *inode, struct file *file) { file->private_data = alloc_buffer(inode->i_private, - fill_registers_buffer); + fill_registers_buffer); return file->private_data ? 0 : -ENOMEM; } @@ -872,20 +859,20 @@ static inline void create_debug_files(struct fotg210_hcd *fotg210) struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self; fotg210->debug_dir = debugfs_create_dir(bus->bus_name, - fotg210_debug_root); + fotg210_debug_root); if (!fotg210->debug_dir) return; if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus, - &debug_async_fops)) + &debug_async_fops)) goto file_error; if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus, - &debug_periodic_fops)) + &debug_periodic_fops)) goto file_error; if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus, - &debug_registers_fops)) + &debug_registers_fops)) goto file_error; return; @@ -899,10 +886,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210) debugfs_remove_recursive(fotg210->debug_dir); } -/*-------------------------------------------------------------------------*/ - -/* - * handshake - spin reading hc until handshake completes or fails +/* handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read * @mask: bits to look at in result of read * @done: value of those bits when handshake succeeds @@ -919,9 +903,9 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210) * bridge shutdown: shutting down the bridge before the devices using it. */ static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr, - u32 mask, u32 done, int usec) + u32 mask, u32 done, int usec) { - u32 result; + u32 result; do { result = fotg210_readl(fotg210, ptr); @@ -936,13 +920,12 @@ static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr, return -ETIMEDOUT; } -/* - * Force HC to halt state from unknown (EHCI spec section 2.3). +/* Force HC to halt state from unknown (EHCI spec section 2.3). * Must be called with interrupts enabled and the lock not held. */ static int fotg210_halt(struct fotg210_hcd *fotg210) { - u32 temp; + u32 temp; spin_lock_irq(&fotg210->lock); @@ -962,20 +945,20 @@ static int fotg210_halt(struct fotg210_hcd *fotg210) synchronize_irq(fotg210_to_hcd(fotg210)->irq); return handshake(fotg210, &fotg210->regs->status, - STS_HALT, STS_HALT, 16 * 125); + STS_HALT, STS_HALT, 16 * 125); } -/* - * Reset a non-running (STS_HALT == 1) controller. +/* Reset a non-running (STS_HALT == 1) controller. * Must be called with interrupts enabled and the lock not held. */ static int fotg210_reset(struct fotg210_hcd *fotg210) { - int retval; - u32 command = fotg210_readl(fotg210, &fotg210->regs->command); + int retval; + u32 command = fotg210_readl(fotg210, &fotg210->regs->command); /* If the EHCI debug controller is active, special care must be - * taken before and after a host controller reset */ + * taken before and after a host controller reset + */ if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210))) fotg210->debug = NULL; @@ -985,7 +968,7 @@ static int fotg210_reset(struct fotg210_hcd *fotg210) fotg210->rh_state = FOTG210_RH_HALTED; fotg210->next_statechange = jiffies; retval = handshake(fotg210, &fotg210->regs->command, - CMD_RESET, 0, 250 * 1000); + CMD_RESET, 0, 250 * 1000); if (retval) return retval; @@ -998,13 +981,12 @@ static int fotg210_reset(struct fotg210_hcd *fotg210) return retval; } -/* - * Idle the controller (turn off the schedules). +/* Idle the controller (turn off the schedules). * Must be called with interrupts enabled and the lock not held. */ static void fotg210_quiesce(struct fotg210_hcd *fotg210) { - u32 temp; + u32 temp; if (fotg210->rh_state != FOTG210_RH_RUNNING) return; @@ -1012,7 +994,7 @@ static void fotg210_quiesce(struct fotg210_hcd *fotg210) /* wait for any schedule enables/disables to take effect */ temp = (fotg210->command << 10) & (STS_ASS | STS_PSS); handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp, - 16 * 125); + 16 * 125); /* then disable anything that's still active */ spin_lock_irq(&fotg210->lock); @@ -1022,11 +1004,9 @@ static void fotg210_quiesce(struct fotg210_hcd *fotg210) /* hardware can take 16 microframes to turn off ... */ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0, - 16 * 125); + 16 * 125); } -/*-------------------------------------------------------------------------*/ - static void end_unlink_async(struct fotg210_hcd *fotg210); static void unlink_empty_async(struct fotg210_hcd *fotg210); static void fotg210_work(struct fotg210_hcd *fotg210); @@ -1034,8 +1014,6 @@ static void start_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); -/*-------------------------------------------------------------------------*/ - /* Set a bit in the USBCMD register */ static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit) { @@ -1056,10 +1034,7 @@ static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit) fotg210_readl(fotg210, &fotg210->regs->command); } -/*-------------------------------------------------------------------------*/ - -/* - * EHCI timer support... Now using hrtimers. +/* EHCI timer support... Now using hrtimers. * * Lots of different events are triggered from fotg210->hrtimer. Whenever * the timer routine runs, it checks each possible event; events that are @@ -1081,8 +1056,7 @@ static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit) * allow for an expiration range of 1 ms. */ -/* - * Delay lengths for the hrtimer event types. +/* Delay lengths for the hrtimer event types. * Keep this list sorted by delay length, in the same order as * the event types indexed by enum fotg210_hrtimer_event in fotg210.h. */ @@ -1103,7 +1077,7 @@ static unsigned event_delays_ns[] = { static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, bool resched) { - ktime_t *timeout = &fotg210->hr_timeouts[event]; + ktime_t *timeout = &fotg210->hr_timeouts[event]; if (resched) *timeout = ktime_add(ktime_get(), @@ -1122,7 +1096,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ static void fotg210_poll_ASS(struct fotg210_hcd *fotg210) { - unsigned actual, want; + unsigned actual, want; /* Don't enable anything if the controller isn't running (e.g., died) */ if (fotg210->rh_state != FOTG210_RH_RUNNING) @@ -1136,7 +1110,7 @@ static void fotg210_poll_ASS(struct fotg210_hcd *fotg210) /* Poll again later, but give up after about 20 ms */ if (fotg210->ASS_poll_count++ < 20) { fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS, - true); + true); return; } fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n", @@ -1154,8 +1128,8 @@ static void fotg210_poll_ASS(struct fotg210_hcd *fotg210) /* Turn off the schedule after a while */ fotg210_enable_event(fotg210, - FOTG210_HRTIMER_DISABLE_ASYNC, - true); + FOTG210_HRTIMER_DISABLE_ASYNC, + true); } } } @@ -1170,7 +1144,7 @@ static void fotg210_disable_ASE(struct fotg210_hcd *fotg210) /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ static void fotg210_poll_PSS(struct fotg210_hcd *fotg210) { - unsigned actual, want; + unsigned actual, want; /* Don't do anything if the controller isn't running (e.g., died) */ if (fotg210->rh_state != FOTG210_RH_RUNNING) @@ -1184,7 +1158,7 @@ static void fotg210_poll_PSS(struct fotg210_hcd *fotg210) /* Poll again later, but give up after about 20 ms */ if (fotg210->PSS_poll_count++ < 20) { fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS, - true); + true); return; } fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n", @@ -1202,8 +1176,8 @@ static void fotg210_poll_PSS(struct fotg210_hcd *fotg210) /* Turn off the schedule after a while */ fotg210_enable_event(fotg210, - FOTG210_HRTIMER_DISABLE_PERIODIC, - true); + FOTG210_HRTIMER_DISABLE_PERIODIC, + true); } } } @@ -1224,7 +1198,7 @@ static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210) if (fotg210->died_poll_count++ < 5) { /* Try again later */ fotg210_enable_event(fotg210, - FOTG210_HRTIMER_POLL_DEAD, true); + FOTG210_HRTIMER_POLL_DEAD, true); return; } fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n"); @@ -1243,7 +1217,7 @@ static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210) /* Handle unlinked interrupt QHs once they are gone from the hardware */ static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210) { - bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); + bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING); /* * Process all the QHs on the intr_unlink list that were added @@ -1254,7 +1228,7 @@ static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210) */ fotg210->intr_unlinking = true; while (fotg210->intr_unlink) { - struct fotg210_qh *qh = fotg210->intr_unlink; + struct fotg210_qh *qh = fotg210->intr_unlink; if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle) break; @@ -1266,7 +1240,7 @@ static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210) /* Handle remaining entries later */ if (fotg210->intr_unlink) { fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, - true); + true); ++fotg210->intr_unlink_cycle; } fotg210->intr_unlinking = false; @@ -1288,7 +1262,7 @@ static void start_free_itds(struct fotg210_hcd *fotg210) /* Wait for controller to stop using old iTDs and siTDs */ static void end_free_itds(struct fotg210_hcd *fotg210) { - struct fotg210_itd *itd, *n; + struct fotg210_itd *itd, *n; if (fotg210->rh_state < FOTG210_RH_RUNNING) fotg210->last_itd_to_free = NULL; @@ -1339,7 +1313,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210) if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { COUNT(fotg210->stats.lost_iaa); fotg210_writel(fotg210, STS_IAA, - &fotg210->regs->status); + &fotg210->regs->status); } fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n", @@ -1355,7 +1329,7 @@ static void turn_on_io_watchdog(struct fotg210_hcd *fotg210) /* Not needed if the controller isn't running or it's already enabled */ if (fotg210->rh_state != FOTG210_RH_RUNNING || (fotg210->enabled_hrtimer_events & - BIT(FOTG210_HRTIMER_IO_WATCHDOG))) + BIT(FOTG210_HRTIMER_IO_WATCHDOG))) return; /* @@ -1365,12 +1339,11 @@ static void turn_on_io_watchdog(struct fotg210_hcd *fotg210) if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog && fotg210->async_count + fotg210->intr_count > 0)) fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG, - true); + true); } -/* - * Handler functions for the hrtimer event types. +/* Handler functions for the hrtimer event types. * Keep this array in the same order as the event types indexed by * enum fotg210_hrtimer_event in fotg210.h. */ @@ -1391,10 +1364,10 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t) { struct fotg210_hcd *fotg210 = container_of(t, struct fotg210_hcd, hrtimer); - ktime_t now; - unsigned long events; - unsigned long flags; - unsigned e; + ktime_t now; + unsigned long events; + unsigned long flags; + unsigned e; spin_lock_irqsave(&fotg210->lock, flags); @@ -1418,50 +1391,37 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t) return HRTIMER_NORESTART; } -/*-------------------------------------------------------------------------*/ - -#define fotg210_bus_suspend NULL -#define fotg210_bus_resume NULL +#define fotg210_bus_suspend NULL +#define fotg210_bus_resume NULL -/*-------------------------------------------------------------------------*/ - -static int check_reset_complete( - struct fotg210_hcd *fotg210, - int index, - u32 __iomem *status_reg, - int port_status -) { +static int check_reset_complete(struct fotg210_hcd *fotg210, int index, + u32 __iomem *status_reg, int port_status) +{ if (!(port_status & PORT_CONNECT)) return port_status; /* if reset finished and it's still not enabled -- handoff */ - if (!(port_status & PORT_PE)) { + if (!(port_status & PORT_PE)) /* with integrated TT, there's nobody to hand it to! */ - fotg210_dbg(fotg210, - "Failed to enable port %d on root hub TT\n", - index+1); - return port_status; - } else { + fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n", + index + 1); + else fotg210_dbg(fotg210, "port %d reset complete, port enabled\n", - index + 1); - } + index + 1); return port_status; } -/*-------------------------------------------------------------------------*/ - /* build "status change" packet (one or two bytes) from HC registers */ -static int -fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) +static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - u32 temp, status; - u32 mask; - int retval = 1; - unsigned long flags; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp, status; + u32 mask; + int retval = 1; + unsigned long flags; /* init status to no-changes */ buf[0] = 0; @@ -1488,9 +1448,9 @@ fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) * controller by the user. */ - if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) - || (fotg210->reset_done[0] && time_after_eq( - jiffies, fotg210->reset_done[0]))) { + if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) || + (fotg210->reset_done[0] && + time_after_eq(jiffies, fotg210->reset_done[0]))) { buf[0] |= 1 << 1; status = STS_PCD; } @@ -1499,15 +1459,11 @@ fotg210_hub_status_data(struct usb_hcd *hcd, char *buf) return status ? retval : 0; } -/*-------------------------------------------------------------------------*/ - -static void -fotg210_hub_descriptor( - struct fotg210_hcd *fotg210, - struct usb_hub_descriptor *desc -) { - int ports = HCS_N_PORTS(fotg210->hcs_params); - u16 temp; +static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210, + struct usb_hub_descriptor *desc) +{ + int ports = HCS_N_PORTS(fotg210->hcs_params); + u16 temp; desc->bDescriptorType = USB_DT_HUB; desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */ @@ -1526,23 +1482,16 @@ fotg210_hub_descriptor( desc->wHubCharacteristics = cpu_to_le16(temp); } -/*-------------------------------------------------------------------------*/ - -static int fotg210_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - int ports = HCS_N_PORTS(fotg210->hcs_params); - u32 __iomem *status_reg = &fotg210->regs->port_status; - u32 temp, temp1, status; - unsigned long flags; - int retval = 0; - unsigned selector; +static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + int ports = HCS_N_PORTS(fotg210->hcs_params); + u32 __iomem *status_reg = &fotg210->regs->port_status; + u32 temp, temp1, status; + unsigned long flags; + int retval = 0; + unsigned selector; /* * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. @@ -1605,7 +1554,7 @@ static int fotg210_hub_control( break; case USB_PORT_FEAT_C_OVER_CURRENT: fotg210_writel(fotg210, temp | OTGISR_OVC, - &fotg210->regs->otgisr); + &fotg210->regs->otgisr); break; case USB_PORT_FEAT_C_RESET: /* GetPortStatus clears reset */ @@ -1617,7 +1566,7 @@ static int fotg210_hub_control( break; case GetHubDescriptor: fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *) - buf); + buf); break; case GetHubStatus: /* no hub-wide feature/status flags */ @@ -1663,16 +1612,16 @@ static int fotg210_hub_control( /* stop resume signaling */ temp = fotg210_readl(fotg210, status_reg); - fotg210_writel(fotg210, - temp & ~(PORT_RWC_BITS | PORT_RESUME), - status_reg); + fotg210_writel(fotg210, temp & + ~(PORT_RWC_BITS | PORT_RESUME), + status_reg); clear_bit(wIndex, &fotg210->resuming_ports); retval = handshake(fotg210, status_reg, - PORT_RESUME, 0, 2000 /* 2msec */); + PORT_RESUME, 0, 2000);/* 2ms */ if (retval != 0) { fotg210_err(fotg210, - "port %d resume error %d\n", - wIndex + 1, retval); + "port %d resume error %d\n", + wIndex + 1, retval); goto error; } temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); @@ -1680,17 +1629,16 @@ static int fotg210_hub_control( } /* whoever resets must GetPortStatus to complete it!! */ - if ((temp & PORT_RESET) - && time_after_eq(jiffies, - fotg210->reset_done[wIndex])) { + if ((temp & PORT_RESET) && time_after_eq(jiffies, + fotg210->reset_done[wIndex])) { status |= USB_PORT_STAT_C_RESET << 16; fotg210->reset_done[wIndex] = 0; clear_bit(wIndex, &fotg210->resuming_ports); /* force reset to complete */ fotg210_writel(fotg210, - temp & ~(PORT_RWC_BITS | PORT_RESET), - status_reg); + temp & ~(PORT_RWC_BITS | PORT_RESET), + status_reg); /* REVISIT: some hardware needs 550+ usec to clear * this bit; seems too long to spin routinely... */ @@ -1698,7 +1646,7 @@ static int fotg210_hub_control( PORT_RESET, 0, 1000); if (retval != 0) { fotg210_err(fotg210, "port %d reset error %d\n", - wIndex + 1, retval); + wIndex + 1, retval); goto error; } @@ -1718,7 +1666,7 @@ static int fotg210_hub_control( temp &= ~PORT_RWC_BITS; fotg210_writel(fotg210, temp, status_reg); fotg210_dbg(fotg210, "port %d --> companion\n", - wIndex + 1); + wIndex + 1); temp = fotg210_readl(fotg210, status_reg); } @@ -1788,7 +1736,7 @@ static int fotg210_hub_control( * mode if we have hostpc feature */ fotg210_writel(fotg210, temp | PORT_SUSPEND, - status_reg); + status_reg); set_bit(wIndex, &fotg210->suspended_ports); break; case USB_PORT_FEAT_RESET: @@ -1866,9 +1814,8 @@ static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd, { return 0; } -/*-------------------------------------------------------------------------*/ -/* - * There's basically three types of memory: + +/* There's basically three types of memory: * - data used only by the HCD ... kmalloc is fine * - async and periodic schedules, shared by HC and HCD ... these * need to use dma_pool or dma_alloc_coherent @@ -1878,12 +1825,9 @@ static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd, * No memory seen by this driver is pageable. */ -/*-------------------------------------------------------------------------*/ - /* Allocate the key transfer structures from the previously allocated pool */ - static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210, - struct fotg210_qtd *qtd, dma_addr_t dma) + struct fotg210_qtd *qtd, dma_addr_t dma) { memset(qtd, 0, sizeof(*qtd)); qtd->qtd_dma = dma; @@ -1894,10 +1838,10 @@ static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210, } static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210, - gfp_t flags) + gfp_t flags) { - struct fotg210_qtd *qtd; - dma_addr_t dma; + struct fotg210_qtd *qtd; + dma_addr_t dma; qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma); if (qtd != NULL) @@ -1907,7 +1851,7 @@ static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210, } static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210, - struct fotg210_qtd *qtd) + struct fotg210_qtd *qtd) { dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma); } @@ -1927,10 +1871,10 @@ static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) } static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210, - gfp_t flags) + gfp_t flags) { - struct fotg210_qh *qh; - dma_addr_t dma; + struct fotg210_qh *qh; + dma_addr_t dma; qh = kzalloc(sizeof(*qh), GFP_ATOMIC); if (!qh) @@ -1958,8 +1902,6 @@ fail: return NULL; } -/*-------------------------------------------------------------------------*/ - /* The queue heads and transfer descriptors are managed from pools tied * to each of the "per device" structures. * This is the initialisation and cleanup code. @@ -1976,23 +1918,19 @@ static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210) fotg210->dummy = NULL; /* DMA consistent memory and pools */ - if (fotg210->qtd_pool) - dma_pool_destroy(fotg210->qtd_pool); + dma_pool_destroy(fotg210->qtd_pool); fotg210->qtd_pool = NULL; - if (fotg210->qh_pool) { - dma_pool_destroy(fotg210->qh_pool); - fotg210->qh_pool = NULL; - } + dma_pool_destroy(fotg210->qh_pool); + fotg210->qh_pool = NULL; - if (fotg210->itd_pool) - dma_pool_destroy(fotg210->itd_pool); + dma_pool_destroy(fotg210->itd_pool); fotg210->itd_pool = NULL; if (fotg210->periodic) dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller, - fotg210->periodic_size * sizeof(u32), - fotg210->periodic, fotg210->periodic_dma); + fotg210->periodic_size * sizeof(u32), + fotg210->periodic, fotg210->periodic_dma); fotg210->periodic = NULL; /* shadow periodic table */ @@ -2039,8 +1977,8 @@ static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags) /* Hardware periodic table */ fotg210->periodic = (__le32 *) dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller, - fotg210->periodic_size * sizeof(__le32), - &fotg210->periodic_dma, 0); + fotg210->periodic_size * sizeof(__le32), + &fotg210->periodic_dma, 0); if (fotg210->periodic == NULL) goto fail; @@ -2049,7 +1987,7 @@ static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags) /* software shadow of hardware table */ fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *), - flags); + flags); if (fotg210->pshadow != NULL) return 0; @@ -2058,9 +1996,7 @@ fail: fotg210_mem_cleanup(fotg210); return -ENOMEM; } -/*-------------------------------------------------------------------------*/ -/* - * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. +/* EHCI hardware queue manipulation ... the core. QH/QTD manipulation. * * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned @@ -2077,16 +2013,12 @@ fail: * buffer low/full speed data so the host collects it at high speed. */ -/*-------------------------------------------------------------------------*/ - /* fill a qtd, returning how much of the buffer we were able to queue up */ - -static int -qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf, - size_t len, int token, int maxpacket) +static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, + dma_addr_t buf, size_t len, int token, int maxpacket) { - int i, count; - u64 addr = buf; + int i, count; + u64 addr = buf; /* one buffer entry per 4K ... first might be short or unaligned */ qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr); @@ -2121,11 +2053,8 @@ qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf, return count; } -/*-------------------------------------------------------------------------*/ - -static inline void -qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, - struct fotg210_qtd *qtd) +static inline void qh_update(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh, struct fotg210_qtd *qtd) { struct fotg210_qh_hw *hw = qh->hw; @@ -2141,7 +2070,7 @@ qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, * ever clear it. */ if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) { - unsigned is_out, epnum; + unsigned is_out, epnum; is_out = qh->is_out; epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f; @@ -2158,8 +2087,7 @@ qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault * recovery (including urb dequeue) would need software changes to a QH... */ -static void -qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { struct fotg210_qtd *qtd; @@ -2185,16 +2113,14 @@ qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) qh_update(fotg210, qh, qtd); } -/*-------------------------------------------------------------------------*/ - static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - struct fotg210_qh *qh = ep->hcpriv; - unsigned long flags; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh = ep->hcpriv; + unsigned long flags; spin_lock_irqsave(&fotg210->lock, flags); qh->clearing_tt = 0; @@ -2205,8 +2131,7 @@ static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd, } static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210, - struct fotg210_qh *qh, - struct urb *urb, u32 token) + struct fotg210_qh *qh, struct urb *urb, u32 token) { /* If an async split transaction gets an error or is unlinked, @@ -2217,27 +2142,24 @@ static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210, */ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { struct usb_device *tt = urb->dev->tt->hub; + dev_dbg(&tt->dev, - "clear tt buffer port %d, a%d ep%d t%08x\n", - urb->dev->ttport, urb->dev->devnum, - usb_pipeendpoint(urb->pipe), token); + "clear tt buffer port %d, a%d ep%d t%08x\n", + urb->dev->ttport, urb->dev->devnum, + usb_pipeendpoint(urb->pipe), token); if (urb->dev->tt->hub != - fotg210_to_hcd(fotg210)->self.root_hub) { + fotg210_to_hcd(fotg210)->self.root_hub) { if (usb_hub_clear_tt_buffer(urb) == 0) qh->clearing_tt = 1; } } } -static int qtd_copy_status( - struct fotg210_hcd *fotg210, - struct urb *urb, - size_t length, - u32 token -) +static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb, + size_t length, u32 token) { - int status = -EINPROGRESS; + int status = -EINPROGRESS; /* count IN/OUT bytes, not SETUP (even short packets) */ if (likely(QTD_PID(token) != 2)) @@ -2274,32 +2196,32 @@ static int qtd_copy_status( } else if (token & QTD_STS_XACT) { /* timeout, bad CRC, wrong PID, etc */ fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n", - urb->dev->devpath, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out"); + urb->dev->devpath, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); status = -EPROTO; } else { /* unknown */ status = -EPROTO; } fotg210_dbg(fotg210, - "dev%d ep%d%s qtd token %08x --> status %d\n", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - token, status); + "dev%d ep%d%s qtd token %08x --> status %d\n", + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + token, status); } return status; } -static void -fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status) +static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, + int status) __releases(fotg210->lock) __acquires(fotg210->lock) { if (likely(urb->hcpriv != NULL)) { - struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv; + struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv; /* S-mask in a QH means it's an interrupt urb */ if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) { @@ -2320,12 +2242,12 @@ __acquires(fotg210->lock) #ifdef FOTG210_URB_TRACE fotg210_dbg(fotg210, - "%s %s urb %p ep%d%s status %d len %d/%d\n", - __func__, urb->dev->devpath, urb, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - status, - urb->actual_length, urb->transfer_buffer_length); + "%s %s urb %p ep%d%s status %d len %d/%d\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + status, + urb->actual_length, urb->transfer_buffer_length); #endif /* complete() can reenter this HCD */ @@ -2337,21 +2259,20 @@ __acquires(fotg210->lock) static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh); -/* - * Process and free completed qtds for a qh, returning URBs to drivers. +/* Process and free completed qtds for a qh, returning URBs to drivers. * Chases up to qh->hw_current. Returns number of completions called, * indicating how much "real" work we did. */ -static unsigned -qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) +static unsigned qh_completions(struct fotg210_hcd *fotg210, + struct fotg210_qh *qh) { - struct fotg210_qtd *last, *end = qh->dummy; - struct list_head *entry, *tmp; - int last_status; - int stopped; - unsigned count = 0; - u8 state; - struct fotg210_qh_hw *hw = qh->hw; + struct fotg210_qtd *last, *end = qh->dummy; + struct list_head *entry, *tmp; + int last_status; + int stopped; + unsigned count = 0; + u8 state; + struct fotg210_qh_hw *hw = qh->hw; if (unlikely(list_empty(&qh->qtd_list))) return count; @@ -2370,7 +2291,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) qh->qh_state = QH_STATE_COMPLETING; stopped = (state == QH_STATE_IDLE); - rescan: +rescan: last = NULL; last_status = -EINPROGRESS; qh->needs_rescan = 0; @@ -2381,9 +2302,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) * if queue is stopped, handles unlinks. */ list_for_each_safe(entry, tmp, &qh->qtd_list) { - struct fotg210_qtd *qtd; - struct urb *urb; - u32 token = 0; + struct fotg210_qtd *qtd; + struct urb *urb; + u32 token = 0; qtd = list_entry(entry, struct fotg210_qtd, qtd_list); urb = qtd->urb; @@ -2392,7 +2313,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) if (last) { if (likely(last->urb != urb)) { fotg210_urb_done(fotg210, last->urb, - last_status); + last_status); count++; last_status = -EINPROGRESS; } @@ -2409,20 +2330,17 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) token = hc32_to_cpu(fotg210, qtd->hw_token); /* always clean up qtds the hc de-activated */ - retry_xacterr: +retry_xacterr: if ((token & QTD_STS_ACTIVE) == 0) { /* Report Data Buffer Error: non-fatal but useful */ if (token & QTD_STS_DBE) fotg210_dbg(fotg210, "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", - urb, - usb_endpoint_num(&urb->ep->desc), + urb, usb_endpoint_num(&urb->ep->desc), usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", - urb->transfer_buffer_length, - qtd, - qh); + urb->transfer_buffer_length, qtd, qh); /* on STALL, error, and short reads this urb must * complete and all its qtds must be recycled. @@ -2433,12 +2351,14 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) * reach the software xacterr limit */ if ((token & QTD_STS_XACT) && - QTD_CERR(token) == 0 && - ++qh->xacterrs < QH_XACTERR_MAX && - !urb->unlinked) { + QTD_CERR(token) == 0 && + ++qh->xacterrs < QH_XACTERR_MAX && + !urb->unlinked) { fotg210_dbg(fotg210, - "detected XactErr len %zu/%zu retry %d\n", - qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); + "detected XactErr len %zu/%zu retry %d\n", + qtd->length - QTD_LENGTH(token), + qtd->length, + qh->xacterrs); /* reset the token in the qtd and the * qh overlay (which still contains @@ -2466,9 +2386,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) * URB_SHORT_NOT_OK was set so the driver submitting * the urbs could clean it up. */ - } else if (IS_SHORT_READ(token) - && !(qtd->hw_alt_next - & FOTG210_LIST_END(fotg210))) { + } else if (IS_SHORT_READ(token) && + !(qtd->hw_alt_next & + FOTG210_LIST_END(fotg210))) { stopped = 1; } @@ -2492,9 +2412,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) continue; /* qh unlinked; token in overlay may be most current */ - if (state == QH_STATE_IDLE - && cpu_to_hc32(fotg210, qtd->qtd_dma) - == hw->hw_current) { + if (state == QH_STATE_IDLE && + cpu_to_hc32(fotg210, qtd->qtd_dma) + == hw->hw_current) { token = hc32_to_cpu(fotg210, hw->hw_token); /* An unlink may leave an incomplete @@ -2502,7 +2422,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) * We have to clear it. */ fotg210_clear_tt_buffer(fotg210, qh, urb, - token); + token); } } @@ -2516,9 +2436,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) if (last_status == -EINPROGRESS) { last_status = qtd_copy_status(fotg210, urb, qtd->length, token); - if (last_status == -EREMOTEIO - && (qtd->hw_alt_next - & FOTG210_LIST_END(fotg210))) + if (last_status == -EREMOTEIO && + (qtd->hw_alt_next & + FOTG210_LIST_END(fotg210))) last_status = -EINPROGRESS; /* As part of low/full-speed endpoint-halt processing @@ -2537,7 +2457,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) */ if (last_status != -EPIPE) fotg210_clear_tt_buffer(fotg210, qh, - urb, token); + urb, token); } } @@ -2615,26 +2535,21 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) return count; } -/*-------------------------------------------------------------------------*/ - /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) /* ... and packet size, for any kind of endpoint descriptor */ #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) -/* - * reverse of qh_urb_transaction: free a list of TDs. +/* reverse of qh_urb_transaction: free a list of TDs. * used for cleanup after errors, before HC sees an URB's TDs. */ -static void qtd_list_free( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct list_head *qtd_list -) { - struct list_head *entry, *temp; +static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb, + struct list_head *qtd_list) +{ + struct list_head *entry, *temp; list_for_each_safe(entry, temp, qtd_list) { - struct fotg210_qtd *qtd; + struct fotg210_qtd *qtd; qtd = list_entry(entry, struct fotg210_qtd, qtd_list); list_del(&qtd->qtd_list); @@ -2642,23 +2557,18 @@ static void qtd_list_free( } } -/* - * create a list of filled qtds for this URB; won't link into qh. +/* create a list of filled qtds for this URB; won't link into qh. */ -static struct list_head * -qh_urb_transaction( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct list_head *head, - gfp_t flags -) { - struct fotg210_qtd *qtd, *qtd_prev; - dma_addr_t buf; - int len, this_sg_len, maxpacket; - int is_input; - u32 token; - int i; - struct scatterlist *sg; +static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210, + struct urb *urb, struct list_head *head, gfp_t flags) +{ + struct fotg210_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, this_sg_len, maxpacket; + int is_input; + u32 token; + int i; + struct scatterlist *sg; /* * URBs map to sequences of QTDs: one logical transaction @@ -2768,8 +2678,8 @@ qh_urb_transaction( * have the alt_next mechanism keep the queue running after the * last data qtd (the only one, for control and most other cases). */ - if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 - || usb_pipecontrol(urb->pipe))) + if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 || + usb_pipecontrol(urb->pipe))) qtd->hw_alt_next = FOTG210_LIST_END(fotg210); /* @@ -2778,7 +2688,7 @@ qh_urb_transaction( * (zero length). */ if (likely(urb->transfer_buffer_length != 0)) { - int one_more = 0; + int one_more = 0; if (usb_pipecontrol(urb->pipe)) { one_more = 1; @@ -2813,9 +2723,7 @@ cleanup: return NULL; } -/*-------------------------------------------------------------------------*/ -/* - * Would be best to create all qh's from config descriptors, +/* Would be best to create all qh's from config descriptors, * when each interface/altsetting is established. Unlink * any previous qh and cancel its urbs first; endpoints are * implicitly reset then (data toggle too). @@ -2823,26 +2731,22 @@ cleanup: */ -/* - * Each QH holds a qtd list; a QH is used for everything except iso. +/* Each QH holds a qtd list; a QH is used for everything except iso. * * For interrupt urbs, the scheduler must set the microframe scheduling * mask(s) each time the QH gets scheduled. For highspeed, that's * just one microframe in the s-mask. For split interrupt transactions * there are additional complications: c-mask, maybe FSTNs. */ -static struct fotg210_qh * -qh_make( - struct fotg210_hcd *fotg210, - struct urb *urb, - gfp_t flags -) { - struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags); - u32 info1 = 0, info2 = 0; - int is_input, type; - int maxp = 0; - struct usb_tt *tt = urb->dev->tt; - struct fotg210_qh_hw *hw; +static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, + gfp_t flags) +{ + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags); + u32 info1 = 0, info2 = 0; + int is_input, type; + int maxp = 0; + struct usb_tt *tt = urb->dev->tt; + struct fotg210_qh_hw *hw; if (!qh) return qh; @@ -2862,7 +2766,7 @@ qh_make( */ if (max_packet(maxp) > 1024) { fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", - max_packet(maxp)); + max_packet(maxp)); goto done; } @@ -2896,7 +2800,7 @@ qh_make( urb->interval = qh->period << 3; } } else { - int think_time; + int think_time; /* gap is f(FS/LS transfer times) */ qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, @@ -2986,7 +2890,7 @@ qh_make( break; default: fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev, - urb->dev->speed); + urb->dev->speed); done: qh_destroy(fotg210, qh); return NULL; @@ -3005,8 +2909,6 @@ done: return qh; } -/*-------------------------------------------------------------------------*/ - static void enable_async(struct fotg210_hcd *fotg210) { if (fotg210->async_count++) @@ -3036,8 +2938,8 @@ static void disable_async(struct fotg210_hcd *fotg210) static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { - __hc32 dma = QH_NEXT(fotg210, qh->qh_dma); - struct fotg210_qh *head; + __hc32 dma = QH_NEXT(fotg210, qh->qh_dma); + struct fotg210_qh *head; /* Don't link a QH if there's a Clear-TT-Buffer pending */ if (unlikely(qh->clearing_tt)) @@ -3064,24 +2966,17 @@ static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) enable_async(fotg210); } -/*-------------------------------------------------------------------------*/ - -/* - * For control/bulk/interrupt, return QH with these TDs appended. +/* For control/bulk/interrupt, return QH with these TDs appended. * Allocates and initializes the QH if necessary. * Returns null if it can't allocate a QH it needs to. * If the QH has TDs (urbs) already, that's great. */ -static struct fotg210_qh *qh_append_tds( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct list_head *qtd_list, - int epnum, - void **ptr -) +static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210, + struct urb *urb, struct list_head *qtd_list, + int epnum, void **ptr) { - struct fotg210_qh *qh = NULL; - __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f); + struct fotg210_qh *qh = NULL; + __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f); qh = (struct fotg210_qh *) *ptr; if (unlikely(qh == NULL)) { @@ -3090,7 +2985,7 @@ static struct fotg210_qh *qh_append_tds( *ptr = qh; } if (likely(qh != NULL)) { - struct fotg210_qtd *qtd; + struct fotg210_qtd *qtd; if (unlikely(list_empty(qtd_list))) qtd = NULL; @@ -3109,9 +3004,9 @@ static struct fotg210_qh *qh_append_tds( * only hc or qh_refresh() ever modify the overlay. */ if (likely(qtd != NULL)) { - struct fotg210_qtd *dummy; - dma_addr_t dma; - __hc32 token; + struct fotg210_qtd *dummy; + dma_addr_t dma; + __hc32 token; /* to avoid racing the HC, use the dummy td instead of * the first td of our list (becomes new dummy). both @@ -3150,32 +3045,28 @@ static struct fotg210_qh *qh_append_tds( return qh; } -/*-------------------------------------------------------------------------*/ - -static int -submit_async( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct list_head *qtd_list, - gfp_t mem_flags -) { - int epnum; - unsigned long flags; - struct fotg210_qh *qh = NULL; - int rc; +static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb, + struct list_head *qtd_list, gfp_t mem_flags) +{ + int epnum; + unsigned long flags; + struct fotg210_qh *qh = NULL; + int rc; epnum = urb->ep->desc.bEndpointAddress; #ifdef FOTG210_URB_TRACE { struct fotg210_qtd *qtd; + qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list); fotg210_dbg(fotg210, - "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", - __func__, urb->dev->devpath, urb, - epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", - urb->transfer_buffer_length, - qtd, urb->ep->hcpriv); + "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", + __func__, urb->dev->devpath, urb, + epnum & 0x0f, (epnum & USB_DIR_IN) + ? "in" : "out", + urb->transfer_buffer_length, + qtd, urb->ep->hcpriv); } #endif @@ -3200,19 +3091,17 @@ submit_async( */ if (likely(qh->qh_state == QH_STATE_IDLE)) qh_link_async(fotg210, qh); - done: +done: spin_unlock_irqrestore(&fotg210->lock, flags); if (unlikely(qh == NULL)) qtd_list_free(fotg210, urb, qtd_list); return rc; } -/*-------------------------------------------------------------------------*/ - static void single_unlink_async(struct fotg210_hcd *fotg210, - struct fotg210_qh *qh) + struct fotg210_qh *qh) { - struct fotg210_qh *prev; + struct fotg210_qh *prev; /* Add to the end of the list of QHs waiting for the next IAAD */ qh->qh_state = QH_STATE_UNLINK; @@ -3260,7 +3149,7 @@ static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) &fotg210->regs->command); fotg210_readl(fotg210, &fotg210->regs->command); fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG, - true); + true); } } @@ -3268,10 +3157,10 @@ static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) static void end_unlink_async(struct fotg210_hcd *fotg210) { - struct fotg210_qh *qh; + struct fotg210_qh *qh; /* Process the idle QHs */ - restart: +restart: fotg210->async_unlinking = true; while (fotg210->async_iaa) { qh = fotg210->async_iaa; @@ -3326,7 +3215,7 @@ static void unlink_empty_async(struct fotg210_hcd *fotg210) /* QHs that haven't been empty for long enough will be handled later */ if (check_unlinks_later) { fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS, - true); + true); ++fotg210->async_unlink_cycle; } } @@ -3335,7 +3224,7 @@ static void unlink_empty_async(struct fotg210_hcd *fotg210) /* caller must own fotg210->lock */ static void start_unlink_async(struct fotg210_hcd *fotg210, - struct fotg210_qh *qh) + struct fotg210_qh *qh) { /* * If the QH isn't linked then there's nothing we can do @@ -3352,18 +3241,16 @@ static void start_unlink_async(struct fotg210_hcd *fotg210, start_iaa_cycle(fotg210, false); } -/*-------------------------------------------------------------------------*/ - static void scan_async(struct fotg210_hcd *fotg210) { - struct fotg210_qh *qh; - bool check_unlinks_later = false; + struct fotg210_qh *qh; + bool check_unlinks_later = false; fotg210->qh_scan_next = fotg210->async->qh_next.qh; while (fotg210->qh_scan_next) { qh = fotg210->qh_scan_next; fotg210->qh_scan_next = qh->qh_next.qh; - rescan: +rescan: /* clean any finished work for this qh */ if (!list_empty(&qh->qtd_list)) { int temp; @@ -3395,15 +3282,13 @@ static void scan_async(struct fotg210_hcd *fotg210) */ if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING && !(fotg210->enabled_hrtimer_events & - BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) { + BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) { fotg210_enable_event(fotg210, - FOTG210_HRTIMER_ASYNC_UNLINKS, true); + FOTG210_HRTIMER_ASYNC_UNLINKS, true); ++fotg210->async_unlink_cycle; } } -/*-------------------------------------------------------------------------*/ -/* - * EHCI scheduled transaction support: interrupt, iso, split iso +/* EHCI scheduled transaction support: interrupt, iso, split iso * These are called "periodic" transactions in the EHCI spec. * * Note that for interrupt transfers, the QH/QTD manipulation is shared @@ -3414,19 +3299,14 @@ static void scan_async(struct fotg210_hcd *fotg210) * It keeps track of every ITD (or SITD) that's linked, and holds enough * pre-calculated schedule data to make appending to the queue be quick. */ - static int fotg210_get_frame(struct usb_hcd *hcd); -/*-------------------------------------------------------------------------*/ - -/* - * periodic_next_shadow - return "next" pointer on shadow list +/* periodic_next_shadow - return "next" pointer on shadow list * @periodic: host pointer to qh/itd * @tag: hardware tag for type of this record */ -static union fotg210_shadow * -periodic_next_shadow(struct fotg210_hcd *fotg210, - union fotg210_shadow *periodic, __hc32 tag) +static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210, + union fotg210_shadow *periodic, __hc32 tag) { switch (hc32_to_cpu(fotg210, tag)) { case Q_TYPE_QH: @@ -3438,9 +3318,8 @@ periodic_next_shadow(struct fotg210_hcd *fotg210, } } -static __hc32 * -shadow_next_periodic(struct fotg210_hcd *fotg210, - union fotg210_shadow *periodic, __hc32 tag) +static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210, + union fotg210_shadow *periodic, __hc32 tag) { switch (hc32_to_cpu(fotg210, tag)) { /* our fotg210_shadow.qh is actually software part */ @@ -3454,11 +3333,11 @@ shadow_next_periodic(struct fotg210_hcd *fotg210, /* caller must hold fotg210->lock */ static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame, - void *ptr) + void *ptr) { - union fotg210_shadow *prev_p = &fotg210->pshadow[frame]; - __hc32 *hw_p = &fotg210->periodic[frame]; - union fotg210_shadow here = *prev_p; + union fotg210_shadow *prev_p = &fotg210->pshadow[frame]; + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow here = *prev_p; /* find predecessor of "ptr"; hw and shadow lists are in sync */ while (here.ptr && here.ptr != ptr) { @@ -3479,17 +3358,17 @@ static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame, Q_NEXT_TYPE(fotg210, *hw_p)); *hw_p = *shadow_next_periodic(fotg210, &here, - Q_NEXT_TYPE(fotg210, *hw_p)); + Q_NEXT_TYPE(fotg210, *hw_p)); } /* how many of the uframe's 125 usecs are allocated? */ -static unsigned short -periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe) +static unsigned short periodic_usecs(struct fotg210_hcd *fotg210, + unsigned frame, unsigned uframe) { - __hc32 *hw_p = &fotg210->periodic[frame]; - union fotg210_shadow *q = &fotg210->pshadow[frame]; - unsigned usecs = 0; - struct fotg210_qh_hw *hw; + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow *q = &fotg210->pshadow[frame]; + unsigned usecs = 0; + struct fotg210_qh_hw *hw; while (q->ptr) { switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) { @@ -3526,12 +3405,10 @@ periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe) } if (usecs > fotg210->uframe_periodic_max) fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n", - frame * 8 + uframe, usecs); + frame * 8 + uframe, usecs); return usecs; } -/*-------------------------------------------------------------------------*/ - static int same_tt(struct usb_device *dev1, struct usb_device *dev2) { if (!dev1->tt || !dev2->tt) @@ -3548,13 +3425,8 @@ static int same_tt(struct usb_device *dev1, struct usb_device *dev2) * for a periodic transfer starting at the specified frame, using * all the uframes in the mask. */ -static int tt_no_collision( - struct fotg210_hcd *fotg210, - unsigned period, - struct usb_device *dev, - unsigned frame, - u32 uf_mask -) +static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period, + struct usb_device *dev, unsigned frame, u32 uf_mask) { if (period == 0) /* error */ return 0; @@ -3564,9 +3436,9 @@ static int tt_no_collision( * calling convention doesn't make that distinction. */ for (; frame < fotg210->periodic_size; frame += period) { - union fotg210_shadow here; - __hc32 type; - struct fotg210_qh_hw *hw; + union fotg210_shadow here; + __hc32 type; + struct fotg210_qh_hw *hw; here = fotg210->pshadow[frame]; type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]); @@ -3579,7 +3451,7 @@ static int tt_no_collision( case Q_TYPE_QH: hw = here.qh->hw; if (same_tt(dev, here.qh->dev)) { - u32 mask; + u32 mask; mask = hc32_to_cpu(fotg210, hw->hw_info2); @@ -3594,8 +3466,8 @@ static int tt_no_collision( /* case Q_TYPE_FSTN: */ default: fotg210_dbg(fotg210, - "periodic frame %d bogus type %d\n", - frame, type); + "periodic frame %d bogus type %d\n", + frame, type); } /* collision or error */ @@ -3607,8 +3479,6 @@ static int tt_no_collision( return 1; } -/*-------------------------------------------------------------------------*/ - static void enable_periodic(struct fotg210_hcd *fotg210) { if (fotg210->periodic_count++) @@ -3632,8 +3502,6 @@ static void disable_periodic(struct fotg210_hcd *fotg210) fotg210_poll_PSS(fotg210); } -/*-------------------------------------------------------------------------*/ - /* periodic schedule slots have iso tds (normal or split) first, then a * sparse tree for active interrupt transfers. * @@ -3642,24 +3510,24 @@ static void disable_periodic(struct fotg210_hcd *fotg210) */ static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { - unsigned i; - unsigned period = qh->period; + unsigned i; + unsigned period = qh->period; dev_dbg(&qh->dev->dev, - "link qh%d-%04x/%p start %d [%d/%d us]\n", - period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) - & (QH_CMASK | QH_SMASK), - qh, qh->start, qh->usecs, qh->c_usecs); + "link qh%d-%04x/%p start %d [%d/%d us]\n", period, + hc32_to_cpup(fotg210, &qh->hw->hw_info2) & + (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, + qh->c_usecs); /* high bandwidth, or otherwise every microframe */ if (period == 0) period = 1; for (i = qh->start; i < fotg210->periodic_size; i += period) { - union fotg210_shadow *prev = &fotg210->pshadow[i]; - __hc32 *hw_p = &fotg210->periodic[i]; - union fotg210_shadow here = *prev; - __hc32 type = 0; + union fotg210_shadow *prev = &fotg210->pshadow[i]; + __hc32 *hw_p = &fotg210->periodic[i]; + union fotg210_shadow here = *prev; + __hc32 type = 0; /* skip the iso nodes at list head */ while (here.ptr) { @@ -3707,10 +3575,10 @@ static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) } static void qh_unlink_periodic(struct fotg210_hcd *fotg210, - struct fotg210_qh *qh) + struct fotg210_qh *qh) { - unsigned i; - unsigned period; + unsigned i; + unsigned period; /* * If qh is for a low/full-speed device, simply unlinking it @@ -3741,10 +3609,10 @@ static void qh_unlink_periodic(struct fotg210_hcd *fotg210, : (qh->usecs * 8); dev_dbg(&qh->dev->dev, - "unlink qh%d-%04x/%p start %d [%d/%d us]\n", - qh->period, - hc32_to_cpup(fotg210, &qh->hw->hw_info2) & - (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs); + "unlink qh%d-%04x/%p start %d [%d/%d us]\n", + qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) & + (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, + qh->c_usecs); /* qh->qh_next still "live" to HC */ qh->qh_state = QH_STATE_UNLINK; @@ -3757,7 +3625,7 @@ static void qh_unlink_periodic(struct fotg210_hcd *fotg210, } static void start_unlink_intr(struct fotg210_hcd *fotg210, - struct fotg210_qh *qh) + struct fotg210_qh *qh) { /* If the QH isn't linked then there's nothing we can do * unless we were called during a giveback, in which case @@ -3794,15 +3662,15 @@ static void start_unlink_intr(struct fotg210_hcd *fotg210, fotg210_handle_intr_unlinks(fotg210); else if (fotg210->intr_unlink == qh) { fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR, - true); + true); ++fotg210->intr_unlink_cycle; } } static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { - struct fotg210_qh_hw *hw = qh->hw; - int rc; + struct fotg210_qh_hw *hw = qh->hw; + int rc; qh->qh_state = QH_STATE_IDLE; hw->hw_next = FOTG210_LIST_END(fotg210); @@ -3811,7 +3679,7 @@ static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) /* reschedule QH iff another request is queued */ if (!list_empty(&qh->qtd_list) && - fotg210->rh_state == FOTG210_RH_RUNNING) { + fotg210->rh_state == FOTG210_RH_RUNNING) { rc = qh_schedule(fotg210, qh); /* An error here likely indicates handshake failure @@ -3830,16 +3698,10 @@ static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) disable_periodic(fotg210); } -/*-------------------------------------------------------------------------*/ - -static int check_period( - struct fotg210_hcd *fotg210, - unsigned frame, - unsigned uframe, - unsigned period, - unsigned usecs -) { - int claimed; +static int check_period(struct fotg210_hcd *fotg210, unsigned frame, + unsigned uframe, unsigned period, unsigned usecs) +{ + int claimed; /* complete split running into next frame? * given FSTN support, we could sometimes check... @@ -3857,7 +3719,7 @@ static int check_period( do { for (uframe = 0; uframe < 7; uframe++) { claimed = periodic_usecs(fotg210, frame, - uframe); + uframe); if (claimed > usecs) return 0; } @@ -3876,16 +3738,11 @@ static int check_period( return 1; } -static int check_intr_schedule( - struct fotg210_hcd *fotg210, - unsigned frame, - unsigned uframe, - const struct fotg210_qh *qh, - __hc32 *c_maskp -) +static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame, + unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp) { - int retval = -ENOSPC; - u8 mask = 0; + int retval = -ENOSPC; + u8 mask = 0; if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ goto done; @@ -3911,10 +3768,10 @@ static int check_intr_schedule( mask |= 1 << uframe; if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) { if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1, - qh->period, qh->c_usecs)) + qh->period, qh->c_usecs)) goto done; if (!check_period(fotg210, frame, uframe + qh->gap_uf, - qh->period, qh->c_usecs)) + qh->period, qh->c_usecs)) goto done; retval = 0; } @@ -3927,11 +3784,11 @@ done: */ static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) { - int status; - unsigned uframe; - __hc32 c_mask; - unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ - struct fotg210_qh_hw *hw = qh->hw; + int status; + unsigned uframe; + __hc32 c_mask; + unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ + struct fotg210_qh_hw *hw = qh->hw; qh_refresh(fotg210, qh); hw->hw_next = FOTG210_LIST_END(fotg210); @@ -3954,7 +3811,7 @@ static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) if (status) { /* "normal" case, uframing flexible except with splits */ if (qh->period) { - int i; + int i; for (i = qh->period; status && i > 0; --i) { frame = ++fotg210->random_frame % qh->period; @@ -3971,7 +3828,7 @@ static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh) } else { frame = 0; status = check_intr_schedule(fotg210, 0, 0, qh, - &c_mask); + &c_mask); } if (status) goto done; @@ -3992,17 +3849,14 @@ done: return status; } -static int intr_submit( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct list_head *qtd_list, - gfp_t mem_flags -) { - unsigned epnum; - unsigned long flags; - struct fotg210_qh *qh; - int status; - struct list_head empty; +static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb, + struct list_head *qtd_list, gfp_t mem_flags) +{ + unsigned epnum; + unsigned long flags; + struct fotg210_qh *qh; + int status; + struct list_head empty; /* get endpoint and transfer/schedule data */ epnum = urb->ep->desc.bEndpointAddress; @@ -4050,11 +3904,11 @@ done_not_linked: static void scan_intr(struct fotg210_hcd *fotg210) { - struct fotg210_qh *qh; + struct fotg210_qh *qh; list_for_each_entry_safe(qh, fotg210->qh_scan_next, - &fotg210->intr_qh_list, intr_node) { - rescan: + &fotg210->intr_qh_list, intr_node) { +rescan: /* clean any finished work for this qh */ if (!list_empty(&qh->qtd_list)) { int temp; @@ -4069,7 +3923,7 @@ static void scan_intr(struct fotg210_hcd *fotg210) temp = qh_completions(fotg210, qh); if (unlikely(qh->needs_rescan || (list_empty(&qh->qtd_list) && - qh->qh_state == QH_STATE_LINKED))) + qh->qh_state == QH_STATE_LINKED))) start_unlink_intr(fotg210, qh); else if (temp != 0) goto rescan; @@ -4077,12 +3931,9 @@ static void scan_intr(struct fotg210_hcd *fotg210) } } -/*-------------------------------------------------------------------------*/ - /* fotg210_iso_stream ops work with both ITD and SITD */ -static struct fotg210_iso_stream * -iso_stream_alloc(gfp_t mem_flags) +static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags) { struct fotg210_iso_stream *stream; @@ -4095,20 +3946,15 @@ iso_stream_alloc(gfp_t mem_flags) return stream; } -static void -iso_stream_init( - struct fotg210_hcd *fotg210, - struct fotg210_iso_stream *stream, - struct usb_device *dev, - int pipe, - unsigned interval -) +static void iso_stream_init(struct fotg210_hcd *fotg210, + struct fotg210_iso_stream *stream, struct usb_device *dev, + int pipe, unsigned interval) { - u32 buf1; - unsigned epnum, maxp; - int is_input; - long bandwidth; - unsigned multi; + u32 buf1; + unsigned epnum, maxp; + int is_input; + long bandwidth; + unsigned multi; /* * this might be a "high bandwidth" highspeed endpoint, @@ -4153,13 +3999,13 @@ iso_stream_init( stream->maxp = maxp; } -static struct fotg210_iso_stream * -iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb) +static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210, + struct urb *urb) { - unsigned epnum; - struct fotg210_iso_stream *stream; + unsigned epnum; + struct fotg210_iso_stream *stream; struct usb_host_endpoint *ep; - unsigned long flags; + unsigned long flags; epnum = usb_pipeendpoint(urb->pipe); if (usb_pipein(urb->pipe)) @@ -4182,8 +4028,8 @@ iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb) /* if dev->ep[epnum] is a QH, hw is set */ } else if (unlikely(stream->hw != NULL)) { fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n", - urb->dev->devpath, epnum, - usb_pipein(urb->pipe) ? "in" : "out"); + urb->dev->devpath, epnum, + usb_pipein(urb->pipe) ? "in" : "out"); stream = NULL; } @@ -4191,15 +4037,13 @@ iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb) return stream; } -/*-------------------------------------------------------------------------*/ - /* fotg210_iso_sched ops can be ITD-only or SITD-only */ -static struct fotg210_iso_sched * -iso_sched_alloc(unsigned packets, gfp_t mem_flags) +static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets, + gfp_t mem_flags) { - struct fotg210_iso_sched *iso_sched; - int size = sizeof(*iso_sched); + struct fotg210_iso_sched *iso_sched; + int size = sizeof(*iso_sched); size += packets * sizeof(struct fotg210_iso_packet); iso_sched = kzalloc(size, mem_flags); @@ -4209,16 +4053,12 @@ iso_sched_alloc(unsigned packets, gfp_t mem_flags) return iso_sched; } -static inline void -itd_sched_init( - struct fotg210_hcd *fotg210, - struct fotg210_iso_sched *iso_sched, - struct fotg210_iso_stream *stream, - struct urb *urb -) +static inline void itd_sched_init(struct fotg210_hcd *fotg210, + struct fotg210_iso_sched *iso_sched, + struct fotg210_iso_stream *stream, struct urb *urb) { - unsigned i; - dma_addr_t dma = urb->transfer_dma; + unsigned i; + dma_addr_t dma = urb->transfer_dma; /* how many uframes are needed for these transfers */ iso_sched->span = urb->number_of_packets * stream->interval; @@ -4227,10 +4067,10 @@ itd_sched_init( * when we fit new itds into the schedule. */ for (i = 0; i < urb->number_of_packets; i++) { - struct fotg210_iso_packet *uframe = &iso_sched->packet[i]; - unsigned length; - dma_addr_t buf; - u32 trans; + struct fotg210_iso_packet *uframe = &iso_sched->packet[i]; + unsigned length; + dma_addr_t buf; + u32 trans; length = urb->iso_frame_desc[i].length; buf = dma + urb->iso_frame_desc[i].offset; @@ -4251,11 +4091,8 @@ itd_sched_init( } } -static void -iso_sched_free( - struct fotg210_iso_stream *stream, - struct fotg210_iso_sched *iso_sched -) +static void iso_sched_free(struct fotg210_iso_stream *stream, + struct fotg210_iso_sched *iso_sched) { if (!iso_sched) return; @@ -4264,20 +4101,15 @@ iso_sched_free( kfree(iso_sched); } -static int -itd_urb_transaction( - struct fotg210_iso_stream *stream, - struct fotg210_hcd *fotg210, - struct urb *urb, - gfp_t mem_flags -) +static int itd_urb_transaction(struct fotg210_iso_stream *stream, + struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags) { - struct fotg210_itd *itd; - dma_addr_t itd_dma; - int i; - unsigned num_itds; - struct fotg210_iso_sched *sched; - unsigned long flags; + struct fotg210_itd *itd; + dma_addr_t itd_dma; + int i; + unsigned num_itds; + struct fotg210_iso_sched *sched; + unsigned long flags; sched = iso_sched_alloc(urb->number_of_packets, mem_flags); if (unlikely(sched == NULL)) @@ -4306,7 +4138,7 @@ itd_urb_transaction( list_del(&itd->itd_list); itd_dma = itd->itd_dma; } else { - alloc_itd: +alloc_itd: spin_unlock_irqrestore(&fotg210->lock, flags); itd = dma_pool_alloc(fotg210->itd_pool, mem_flags, &itd_dma); @@ -4330,16 +4162,8 @@ itd_urb_transaction( return 0; } -/*-------------------------------------------------------------------------*/ - -static inline int -itd_slot_ok( - struct fotg210_hcd *fotg210, - u32 mod, - u32 uframe, - u8 usecs, - u32 period -) +static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe, + u8 usecs, u32 period) { uframe %= period; do { @@ -4354,8 +4178,7 @@ itd_slot_ok( return 1; } -/* - * This scheduler plans almost as far into the future as it has actual +/* This scheduler plans almost as far into the future as it has actual * periodic schedule slots. (Affected by TUNE_FLS, which defaults to * "as small as possible" to be cache-friendlier.) That limits the size * transfers you can stream reliably; avoid more than 64 msec per urb. @@ -4365,19 +4188,15 @@ itd_slot_ok( * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler! */ -#define SCHEDULE_SLOP 80 /* microframes */ +#define SCHEDULE_SLOP 80 /* microframes */ -static int -iso_stream_schedule( - struct fotg210_hcd *fotg210, - struct urb *urb, - struct fotg210_iso_stream *stream -) +static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb, + struct fotg210_iso_stream *stream) { - u32 now, next, start, period, span; - int status; - unsigned mod = fotg210->periodic_size << 3; - struct fotg210_iso_sched *sched = urb->hcpriv; + u32 now, next, start, period, span; + int status; + unsigned mod = fotg210->periodic_size << 3; + struct fotg210_iso_sched *sched = urb->hcpriv; period = urb->interval; span = sched->span; @@ -4396,7 +4215,7 @@ iso_stream_schedule( * slot in the schedule, implicitly assuming URB_ISO_ASAP. */ if (likely(!list_empty(&stream->td_list))) { - u32 excess; + u32 excess; /* For high speed devices, allow scheduling within the * isochronous scheduling threshold. For full speed devices @@ -4435,6 +4254,7 @@ iso_stream_schedule( */ else { int done = 0; + start = SCHEDULE_SLOP + (now & ~0x07); /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ @@ -4457,15 +4277,15 @@ iso_stream_schedule( /* no room in the schedule */ if (!done) { fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n", - urb, now, now + mod); + urb, now, now + mod); status = -ENOSPC; goto fail; } } /* Tried to schedule too far into the future? */ - if (unlikely(start - now + span - period - >= mod - 2 * SCHEDULE_SLOP)) { + if (unlikely(start - now + span - period >= + mod - 2 * SCHEDULE_SLOP)) { fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n", urb, start - now, span - period, mod - 2 * SCHEDULE_SLOP); @@ -4485,17 +4305,14 @@ iso_stream_schedule( fotg210->next_frame = now >> 3; return 0; - fail: +fail: iso_sched_free(stream, sched); urb->hcpriv = NULL; return status; } -/*-------------------------------------------------------------------------*/ - -static inline void -itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream, - struct fotg210_itd *itd) +static inline void itd_init(struct fotg210_hcd *fotg210, + struct fotg210_iso_stream *stream, struct fotg210_itd *itd) { int i; @@ -4511,17 +4328,12 @@ itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream, /* All other fields are filled when scheduling */ } -static inline void -itd_patch( - struct fotg210_hcd *fotg210, - struct fotg210_itd *itd, - struct fotg210_iso_sched *iso_sched, - unsigned index, - u16 uframe -) +static inline void itd_patch(struct fotg210_hcd *fotg210, + struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched, + unsigned index, u16 uframe) { - struct fotg210_iso_packet *uf = &iso_sched->packet[index]; - unsigned pg = itd->pg; + struct fotg210_iso_packet *uf = &iso_sched->packet[index]; + unsigned pg = itd->pg; uframe &= 0x07; itd->index[uframe] = index; @@ -4533,7 +4345,7 @@ itd_patch( /* iso_frame_desc[].offset must be strictly increasing */ if (unlikely(uf->cross)) { - u64 bufp = uf->bufp + 4096; + u64 bufp = uf->bufp + 4096; itd->pg = ++pg; itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0); @@ -4541,13 +4353,13 @@ itd_patch( } } -static inline void -itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd) +static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame, + struct fotg210_itd *itd) { - union fotg210_shadow *prev = &fotg210->pshadow[frame]; - __hc32 *hw_p = &fotg210->periodic[frame]; - union fotg210_shadow here = *prev; - __hc32 type = 0; + union fotg210_shadow *prev = &fotg210->pshadow[frame]; + __hc32 *hw_p = &fotg210->periodic[frame]; + union fotg210_shadow here = *prev; + __hc32 type = 0; /* skip any iso nodes which might belong to previous microframes */ while (here.ptr) { @@ -4568,17 +4380,13 @@ itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd) } /* fit urb's itds into the selected schedule slot; activate as needed */ -static void itd_link_urb( - struct fotg210_hcd *fotg210, - struct urb *urb, - unsigned mod, - struct fotg210_iso_stream *stream -) -{ - int packet; - unsigned next_uframe, uframe, frame; - struct fotg210_iso_sched *iso_sched = urb->hcpriv; - struct fotg210_itd *itd; +static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb, + unsigned mod, struct fotg210_iso_stream *stream) +{ + int packet; + unsigned next_uframe, uframe, frame; + struct fotg210_iso_sched *iso_sched = urb->hcpriv; + struct fotg210_itd *itd; next_uframe = stream->next_uframe & (mod - 1); @@ -4621,7 +4429,7 @@ static void itd_link_urb( if (((next_uframe >> 3) != frame) || packet == urb->number_of_packets) { itd_link(fotg210, frame & (fotg210->periodic_size - 1), - itd); + itd); itd = NULL; } } @@ -4635,8 +4443,8 @@ static void itd_link_urb( enable_periodic(fotg210); } -#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\ - FOTG210_ISOC_XACTERR) +#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\ + FOTG210_ISOC_XACTERR) /* Process and recycle a completed ITD. Return true iff its urb completed, * and hence its completion callback probably added things to the hardware @@ -4650,14 +4458,14 @@ static void itd_link_urb( */ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) { - struct urb *urb = itd->urb; - struct usb_iso_packet_descriptor *desc; - u32 t; - unsigned uframe; - int urb_index = -1; - struct fotg210_iso_stream *stream = itd->stream; - struct usb_device *dev; - bool retval = false; + struct urb *urb = itd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + unsigned uframe; + int urb_index = -1; + struct fotg210_iso_stream *stream = itd->stream; + struct usb_device *dev; + bool retval = false; /* for each uframe with a packet */ for (uframe = 0; uframe < 8; uframe++) { @@ -4702,8 +4510,8 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) goto done; /* ASSERT: it's really the last itd for this urb - list_for_each_entry (itd, &stream->td_list, itd_list) - BUG_ON (itd->urb == urb); + * list_for_each_entry (itd, &stream->td_list, itd_list) + * BUG_ON (itd->urb == urb); */ /* give urb back to the driver; completion often (re)submits */ @@ -4740,14 +4548,12 @@ done: return retval; } -/*-------------------------------------------------------------------------*/ - static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb, - gfp_t mem_flags) + gfp_t mem_flags) { - int status = -EINVAL; - unsigned long flags; - struct fotg210_iso_stream *stream; + int status = -EINVAL; + unsigned long flags; + struct fotg210_iso_stream *stream; /* Get iso_stream head */ stream = iso_stream_find(fotg210, urb); @@ -4756,22 +4562,22 @@ static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb, return -ENOMEM; } if (unlikely(urb->interval != stream->interval && - fotg210_port_speed(fotg210, 0) == - USB_PORT_STAT_HIGH_SPEED)) { - fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n", + fotg210_port_speed(fotg210, 0) == + USB_PORT_STAT_HIGH_SPEED)) { + fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n", stream->interval, urb->interval); - goto done; + goto done; } #ifdef FOTG210_URB_TRACE fotg210_dbg(fotg210, - "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n", - __func__, urb->dev->devpath, urb, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - urb->transfer_buffer_length, - urb->number_of_packets, urb->interval, - stream); + "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + urb->transfer_buffer_length, + urb->number_of_packets, urb->interval, + stream); #endif /* allocate ITDs w/o locking anything */ @@ -4795,19 +4601,87 @@ static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb, itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream); else usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb); - done_not_linked: +done_not_linked: spin_unlock_irqrestore(&fotg210->lock, flags); - done: +done: return status; } -/*-------------------------------------------------------------------------*/ +static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame, + unsigned now_frame, bool live) +{ + unsigned uf; + bool modified; + union fotg210_shadow q, *q_p; + __hc32 type, *hw_p; + + /* scan each element in frame's queue for completions */ + q_p = &fotg210->pshadow[frame]; + hw_p = &fotg210->periodic[frame]; + q.ptr = q_p->ptr; + type = Q_NEXT_TYPE(fotg210, *hw_p); + modified = false; + + while (q.ptr) { + switch (hc32_to_cpu(fotg210, type)) { + case Q_TYPE_ITD: + /* If this ITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (frame == now_frame && live) { + rmb(); + for (uf = 0; uf < 8; uf++) { + if (q.itd->hw_transaction[uf] & + ITD_ACTIVE(fotg210)) + break; + } + if (uf < 8) { + q_p = &q.itd->itd_next; + hw_p = &q.itd->hw_next; + type = Q_NEXT_TYPE(fotg210, + q.itd->hw_next); + q = *q_p; + break; + } + } + + /* Take finished ITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. HC won't cache the + * pointer for much longer, if at all. + */ + *q_p = q.itd->itd_next; + *hw_p = q.itd->hw_next; + type = Q_NEXT_TYPE(fotg210, q.itd->hw_next); + wmb(); + modified = itd_complete(fotg210, q.itd); + q = *q_p; + break; + default: + fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n", + type, frame, q.ptr); + /* FALL THROUGH */ + case Q_TYPE_QH: + case Q_TYPE_FSTN: + /* End of the iTDs and siTDs */ + q.ptr = NULL; + break; + } + + /* assume completion callbacks modify the queue */ + if (unlikely(modified && fotg210->isoc_count > 0)) + return -EINVAL; + } + return 0; +} static void scan_isoc(struct fotg210_hcd *fotg210) { - unsigned uf, now_frame, frame; - unsigned fmask = fotg210->periodic_size - 1; - bool modified, live; + unsigned uf, now_frame, frame, ret; + unsigned fmask = fotg210->periodic_size - 1; + bool live; /* * When running, scan from last scan point up to "now" @@ -4826,69 +4700,10 @@ static void scan_isoc(struct fotg210_hcd *fotg210) frame = fotg210->next_frame; for (;;) { - union fotg210_shadow q, *q_p; - __hc32 type, *hw_p; - -restart: - /* scan each element in frame's queue for completions */ - q_p = &fotg210->pshadow[frame]; - hw_p = &fotg210->periodic[frame]; - q.ptr = q_p->ptr; - type = Q_NEXT_TYPE(fotg210, *hw_p); - modified = false; - - while (q.ptr != NULL) { - switch (hc32_to_cpu(fotg210, type)) { - case Q_TYPE_ITD: - /* If this ITD is still active, leave it for - * later processing ... check the next entry. - * No need to check for activity unless the - * frame is current. - */ - if (frame == now_frame && live) { - rmb(); - for (uf = 0; uf < 8; uf++) { - if (q.itd->hw_transaction[uf] & - ITD_ACTIVE(fotg210)) - break; - } - if (uf < 8) { - q_p = &q.itd->itd_next; - hw_p = &q.itd->hw_next; - type = Q_NEXT_TYPE(fotg210, - q.itd->hw_next); - q = *q_p; - break; - } - } - - /* Take finished ITDs out of the schedule - * and process them: recycle, maybe report - * URB completion. HC won't cache the - * pointer for much longer, if at all. - */ - *q_p = q.itd->itd_next; - *hw_p = q.itd->hw_next; - type = Q_NEXT_TYPE(fotg210, q.itd->hw_next); - wmb(); - modified = itd_complete(fotg210, q.itd); - q = *q_p; - break; - default: - fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n", - type, frame, q.ptr); - /* FALL THROUGH */ - case Q_TYPE_QH: - case Q_TYPE_FSTN: - /* End of the iTDs and siTDs */ - q.ptr = NULL; - break; - } - - /* assume completion callbacks modify the queue */ - if (unlikely(modified && fotg210->isoc_count > 0)) - goto restart; - } + ret = 1; + while (ret != 0) + ret = scan_frame_queue(fotg210, frame, + now_frame, live); /* Stop when we have reached the current frame */ if (frame == now_frame) @@ -4897,16 +4712,14 @@ restart: } fotg210->next_frame = now_frame; } -/*-------------------------------------------------------------------------*/ -/* - * Display / Set uframe_periodic_max + +/* Display / Set uframe_periodic_max */ static ssize_t show_uframe_periodic_max(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, char *buf) { - struct fotg210_hcd *fotg210; - int n; + struct fotg210_hcd *fotg210; + int n; fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max); @@ -4915,15 +4728,14 @@ static ssize_t show_uframe_periodic_max(struct device *dev, static ssize_t store_uframe_periodic_max(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) + struct device_attribute *attr, const char *buf, size_t count) { - struct fotg210_hcd *fotg210; - unsigned uframe_periodic_max; - unsigned frame, uframe; - unsigned short allocated_max; - unsigned long flags; - ssize_t ret; + struct fotg210_hcd *fotg210; + unsigned uframe_periodic_max; + unsigned frame, uframe; + unsigned short allocated_max; + unsigned long flags; + ssize_t ret; fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev))); if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) @@ -4931,7 +4743,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n", - uframe_periodic_max); + uframe_periodic_max); return -EINVAL; } @@ -4954,22 +4766,22 @@ static ssize_t store_uframe_periodic_max(struct device *dev, for (frame = 0; frame < fotg210->periodic_size; ++frame) for (uframe = 0; uframe < 7; ++uframe) allocated_max = max(allocated_max, - periodic_usecs(fotg210, frame, uframe)); + periodic_usecs(fotg210, frame, + uframe)); if (allocated_max > uframe_periodic_max) { fotg210_info(fotg210, - "cannot decrease uframe_periodic_max because " - "periodic bandwidth is already allocated " - "(%u > %u)\n", - allocated_max, uframe_periodic_max); + "cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n", + allocated_max, uframe_periodic_max); goto out_unlock; } } /* increasing is always ok */ - fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n", - 100 * uframe_periodic_max/125, uframe_periodic_max); + fotg210_info(fotg210, + "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n", + 100 * uframe_periodic_max/125, uframe_periodic_max); if (uframe_periodic_max != 100) fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n"); @@ -4987,8 +4799,8 @@ static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, static inline int create_sysfs_files(struct fotg210_hcd *fotg210) { - struct device *controller = fotg210_to_hcd(fotg210)->self.controller; - int i = 0; + struct device *controller = fotg210_to_hcd(fotg210)->self.controller; + int i = 0; if (i) goto out; @@ -5000,12 +4812,10 @@ out: static inline void remove_sysfs_files(struct fotg210_hcd *fotg210) { - struct device *controller = fotg210_to_hcd(fotg210)->self.controller; + struct device *controller = fotg210_to_hcd(fotg210)->self.controller; device_remove_file(controller, &dev_attr_uframe_periodic_max); } -/*-------------------------------------------------------------------------*/ - /* On some systems, leaving remote wakeup enabled prevents system shutdown. * The firmware seems to think that powering off is a wakeup event! * This routine turns off remote wakeup and everything else, on all ports. @@ -5017,8 +4827,7 @@ static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210) fotg210_writel(fotg210, PORT_RWC_BITS, status_reg); } -/* - * Halt HC, turn off all ports, and let the BIOS use the companion controllers. +/* Halt HC, turn off all ports, and let the BIOS use the companion controllers. * Must be called with interrupts enabled and the lock not held. */ static void fotg210_silence_controller(struct fotg210_hcd *fotg210) @@ -5037,7 +4846,7 @@ static void fotg210_silence_controller(struct fotg210_hcd *fotg210) */ static void fotg210_shutdown(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); spin_lock_irq(&fotg210->lock); fotg210->shutdown = true; @@ -5050,10 +4859,7 @@ static void fotg210_shutdown(struct usb_hcd *hcd) hrtimer_cancel(&fotg210->hrtimer); } -/*-------------------------------------------------------------------------*/ - -/* - * fotg210_work is called from some interrupts, timers, and so on. +/* fotg210_work is called from some interrupts, timers, and so on. * it calls driver completion functions, after dropping fotg210->lock. */ static void fotg210_work(struct fotg210_hcd *fotg210) @@ -5068,7 +4874,7 @@ static void fotg210_work(struct fotg210_hcd *fotg210) } fotg210->scanning = true; - rescan: +rescan: fotg210->need_rescan = false; if (fotg210->async_count) scan_async(fotg210); @@ -5087,12 +4893,11 @@ static void fotg210_work(struct fotg210_hcd *fotg210) turn_on_io_watchdog(fotg210); } -/* - * Called when the fotg210_hcd module is removed. +/* Called when the fotg210_hcd module is removed. */ static void fotg210_stop(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); fotg210_dbg(fotg210, "stop\n"); @@ -5116,26 +4921,26 @@ static void fotg210_stop(struct usb_hcd *hcd) spin_unlock_irq(&fotg210->lock); fotg210_mem_cleanup(fotg210); -#ifdef FOTG210_STATS +#ifdef FOTG210_STATS fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n", - fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa, - fotg210->stats.lost_iaa); + fotg210->stats.normal, fotg210->stats.error, + fotg210->stats.iaa, fotg210->stats.lost_iaa); fotg210_dbg(fotg210, "complete %ld unlink %ld\n", - fotg210->stats.complete, fotg210->stats.unlink); + fotg210->stats.complete, fotg210->stats.unlink); #endif dbg_status(fotg210, "fotg210_stop completed", - fotg210_readl(fotg210, &fotg210->regs->status)); + fotg210_readl(fotg210, &fotg210->regs->status)); } /* one-time init, only for memory state */ static int hcd_fotg210_init(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - u32 temp; - int retval; - u32 hcc_params; - struct fotg210_qh_hw *hw; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp; + int retval; + u32 hcc_params; + struct fotg210_qh_hw *hw; spin_lock_init(&fotg210->lock); @@ -5238,18 +5043,18 @@ static int hcd_fotg210_init(struct usb_hcd *hcd) /* start HC running; it's halted, hcd_fotg210_init() has been run (once) */ static int fotg210_run(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - u32 temp; - u32 hcc_params; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 temp; + u32 hcc_params; hcd->uses_new_polling = 1; /* EHCI spec section 4.1 */ fotg210_writel(fotg210, fotg210->periodic_dma, - &fotg210->regs->frame_list); + &fotg210->regs->frame_list); fotg210_writel(fotg210, (u32)fotg210->async->qh_dma, - &fotg210->regs->async_next); + &fotg210->regs->async_next); /* * hcc_params controls whether fotg210->regs->segment must (!!!) @@ -5292,19 +5097,19 @@ static int fotg210_run(struct usb_hcd *hcd) fotg210->rh_state = FOTG210_RH_RUNNING; /* unblock posted writes */ fotg210_readl(fotg210, &fotg210->regs->command); - msleep(5); + usleep_range(5000, 10000); up_write(&ehci_cf_port_reset_rwsem); fotg210->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(fotg210, - fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); + fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); fotg210_info(fotg210, - "USB %x.%x started, EHCI %x.%02x\n", - ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f), - temp >> 8, temp & 0xff); + "USB %x.%x started, EHCI %x.%02x\n", + ((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f), + temp >> 8, temp & 0xff); fotg210_writel(fotg210, INTR_MASK, - &fotg210->regs->intr_enable); /* Turn On Interrupts */ + &fotg210->regs->intr_enable); /* Turn On Interrupts */ /* GRR this is run-once init(), being done every time the HC starts. * So long as they're part of class devices, we can't do it init() @@ -5322,14 +5127,14 @@ static int fotg210_setup(struct usb_hcd *hcd) int retval; fotg210->regs = (void __iomem *)fotg210->caps + - HC_LENGTH(fotg210, - fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); + HC_LENGTH(fotg210, + fotg210_readl(fotg210, &fotg210->caps->hc_capbase)); dbg_hcs_params(fotg210, "reset"); dbg_hcc_params(fotg210, "reset"); /* cache this readonly data; minimize chip reads */ fotg210->hcs_params = fotg210_readl(fotg210, - &fotg210->caps->hcs_params); + &fotg210->caps->hcs_params); fotg210->sbrn = HCD_USB2; @@ -5347,13 +5152,11 @@ static int fotg210_setup(struct usb_hcd *hcd) return 0; } -/*-------------------------------------------------------------------------*/ - static irqreturn_t fotg210_irq(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - u32 status, masked_status, pcd_status = 0, cmd; - int bh; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + u32 status, masked_status, pcd_status = 0, cmd; + int bh; spin_lock(&fotg210->lock); @@ -5373,7 +5176,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd) /* Shared IRQ? */ if (!masked_status || - unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) { + unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) { spin_unlock(&fotg210->lock); return IRQ_NONE; } @@ -5440,7 +5243,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd) if (test_bit(0, &fotg210->suspended_ports) && ((pstatus & PORT_RESUME) || - !(pstatus & PORT_SUSPEND)) && + !(pstatus & PORT_SUSPEND)) && (pstatus & PORT_PE) && fotg210->reset_done[0] == 0) { @@ -5469,7 +5272,7 @@ dead: fotg210->rh_state = FOTG210_RH_STOPPING; fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); fotg210_writel(fotg210, fotg210->command, - &fotg210->regs->command); + &fotg210->regs->command); fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable); fotg210_handle_controller_death(fotg210); @@ -5485,10 +5288,7 @@ dead: return IRQ_HANDLED; } -/*-------------------------------------------------------------------------*/ - -/* - * non-error returns are a promise to giveback() the urb later +/* non-error returns are a promise to giveback() the urb later * we drop ownership so next owner (or urb unlink) can get it * * urb + dev is in hcd.self.controller.urb_list @@ -5499,13 +5299,11 @@ dead: * NOTE: control, bulk, and interrupt share the same code to append TDs * to a (possibly active) QH, and the same QH scanning code. */ -static int fotg210_urb_enqueue( - struct usb_hcd *hcd, - struct urb *urb, - gfp_t mem_flags -) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - struct list_head qtd_list; +static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct list_head qtd_list; INIT_LIST_HEAD(&qtd_list); @@ -5539,10 +5337,10 @@ static int fotg210_urb_enqueue( static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - struct fotg210_qh *qh; - unsigned long flags; - int rc; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh; + unsigned long flags; + int rc; spin_lock_irqsave(&fotg210->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); @@ -5603,16 +5401,14 @@ done: return rc; } -/*-------------------------------------------------------------------------*/ - /* bulk qh holds the data toggle */ -static void -fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +static void fotg210_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - unsigned long flags; - struct fotg210_qh *qh, *tmp; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + unsigned long flags; + struct fotg210_qh *qh, *tmp; /* ASSERT: any requests/urbs are being unlinked */ /* ASSERT: nobody can be submitting urbs for this any more */ @@ -5627,7 +5423,7 @@ rescan: * accelerate iso completions ... so spin a while. */ if (qh->hw == NULL) { - struct fotg210_iso_stream *stream = ep->hcpriv; + struct fotg210_iso_stream *stream = ep->hcpriv; if (!list_empty(&stream->td_list)) goto idle_timeout; @@ -5671,24 +5467,24 @@ idle_timeout: * that's not our job. just leak this memory. */ fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n", - qh, ep->desc.bEndpointAddress, qh->qh_state, - list_empty(&qh->qtd_list) ? "" : "(has tds)"); + qh, ep->desc.bEndpointAddress, qh->qh_state, + list_empty(&qh->qtd_list) ? "" : "(has tds)"); break; } - done: +done: ep->hcpriv = NULL; spin_unlock_irqrestore(&fotg210->lock, flags); } -static void -fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +static void fotg210_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); - struct fotg210_qh *qh; - int eptype = usb_endpoint_type(&ep->desc); - int epnum = usb_endpoint_num(&ep->desc); - int is_out = usb_endpoint_dir_out(&ep->desc); - unsigned long flags; + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_qh *qh; + int eptype = usb_endpoint_type(&ep->desc); + int epnum = usb_endpoint_num(&ep->desc); + int is_out = usb_endpoint_dir_out(&ep->desc); + unsigned long flags; if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) return; @@ -5723,15 +5519,13 @@ fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) static int fotg210_get_frame(struct usb_hcd *hcd) { - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); + return (fotg210_read_frame_index(fotg210) >> 3) % fotg210->periodic_size; } -/*-------------------------------------------------------------------------*/ - -/* - * The EHCI in ChipIdea HDRC cannot be a separate module or device, +/* The EHCI in ChipIdea HDRC cannot be a separate module or device, * because its registers (and irq) are shared between host/gadget/otg * functions and in order to facilitate role switching we cannot * give the fotg210 driver exclusive access to those. @@ -5791,7 +5585,7 @@ static void fotg210_init(struct fotg210_hcd *fotg210) u32 value; iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY, - &fotg210->regs->gmir); + &fotg210->regs->gmir); value = ioread32(&fotg210->regs->otgcsr); value &= ~OTGCSR_A_BUS_DROP; @@ -5808,12 +5602,12 @@ static void fotg210_init(struct fotg210_hcd *fotg210) */ static int fotg210_hcd_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct usb_hcd *hcd; - struct resource *res; - int irq; - int retval = -ENODEV; - struct fotg210_hcd *fotg210; + struct device *dev = &pdev->dev; + struct usb_hcd *hcd; + struct resource *res; + int irq; + int retval = -ENODEV; + struct fotg210_hcd *fotg210; if (usb_disabled()) return -ENODEV; @@ -5822,9 +5616,8 @@ static int fotg210_hcd_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { - dev_err(dev, - "Found HC with no IRQ. Check %s setup!\n", - dev_name(dev)); + dev_err(dev, "Found HC with no IRQ. Check %s setup!\n", + dev_name(dev)); return -ENODEV; } @@ -5883,8 +5676,8 @@ fail_create_hcd: */ static int fotg210_hcd_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct usb_hcd *hcd = dev_get_drvdata(dev); + struct device *dev = &pdev->dev; + struct usb_hcd *hcd = dev_get_drvdata(dev); if (!hcd) return 0; @@ -5914,12 +5707,12 @@ static int __init fotg210_hcd_init(void) set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) - pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n"); + pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n"); pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n", - hcd_name, - sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd), - sizeof(struct fotg210_itd)); + hcd_name, sizeof(struct fotg210_qh), + sizeof(struct fotg210_qtd), + sizeof(struct fotg210_itd)); fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root); if (!fotg210_debug_root) { @@ -5932,7 +5725,6 @@ static int __init fotg210_hcd_init(void) goto clean; return retval; - platform_driver_unregister(&fotg210_hcd_driver); clean: debugfs_remove(fotg210_debug_root); fotg210_debug_root = NULL; diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h index 3bad17859cd7..b5cfa7aeb277 100644 --- a/drivers/usb/host/fotg210.h +++ b/drivers/usb/host/fotg210.h @@ -137,19 +137,25 @@ struct fotg210_hcd { /* one per controller */ /* per root hub port */ unsigned long reset_done[FOTG210_MAX_ROOT_PORTS]; - /* bit vectors (one bit per port) */ - unsigned long bus_suspended; /* which ports were - already suspended at the start of a bus suspend */ - unsigned long companion_ports; /* which ports are - dedicated to the companion controller */ - unsigned long owned_ports; /* which ports are - owned by the companion during a bus suspend */ - unsigned long port_c_suspend; /* which ports have - the change-suspend feature turned on */ - unsigned long suspended_ports; /* which ports are - suspended */ - unsigned long resuming_ports; /* which ports have - started to resume */ + /* bit vectors (one bit per port) + * which ports were already suspended at the start of a bus suspend + */ + unsigned long bus_suspended; + + /* which ports are edicated to the companion controller */ + unsigned long companion_ports; + + /* which ports are owned by the companion during a bus suspend */ + unsigned long owned_ports; + + /* which ports have the change-suspend feature turned on */ + unsigned long port_c_suspend; + + /* which ports are suspended */ + unsigned long suspended_ports; + + /* which ports have started to resume */ + unsigned long resuming_ports; /* per-HC memory pools (could be per-bus, but ...) */ struct dma_pool *qh_pool; /* qh per active urb */ @@ -585,10 +591,10 @@ struct fotg210_fstn { /* Prepare the PORTSC wakeup flags during controller suspend/resume */ #define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \ - fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup); + fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup) #define fotg210_prepare_ports_for_controller_resume(fotg210) \ - fotg210_adjust_port_wakeup_flags(fotg210, false, false); + fotg210_adjust_port_wakeup_flags(fotg210, false, false) /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 534c4c5d278a..0c382652a399 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -351,6 +351,7 @@ static const struct of_device_id fsl_usb2_mph_dr_of_match[] = { #endif {}, }; +MODULE_DEVICE_TABLE(of, fsl_usb2_mph_dr_of_match); static struct platform_driver fsl_usb2_mph_dr_driver = { .driver = { diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c deleted file mode 100644 index 1fd8718a9f11..000000000000 --- a/drivers/usb/host/fusbh200-hcd.c +++ /dev/null @@ -1,5894 +0,0 @@ -/* - * Faraday FUSBH200 EHCI-like driver - * - * Copyright (c) 2013 Faraday Technology Corporation - * - * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com> - * Feng-Hsin Chiang <john453@faraday-tech.com> - * Po-Yu Chuang <ratbert.chuang@gmail.com> - * - * Most of code borrowed from the Linux-3.7 EHCI driver - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/dmapool.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/sched.h> -#include <linux/vmalloc.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/hrtimer.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/usb.h> -#include <linux/usb/hcd.h> -#include <linux/moduleparam.h> -#include <linux/dma-mapping.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/platform_device.h> - -#include <asm/byteorder.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/unaligned.h> - -/*-------------------------------------------------------------------------*/ -#define DRIVER_AUTHOR "Yuan-Hsin Chen" -#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver" - -static const char hcd_name [] = "fusbh200_hcd"; - -#undef FUSBH200_URB_TRACE - -/* magic numbers that can affect system performance */ -#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ -#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ -#define FUSBH200_TUNE_RL_TT 0 -#define FUSBH200_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ -#define FUSBH200_TUNE_MULT_TT 1 -/* - * Some drivers think it's safe to schedule isochronous transfers more than - * 256 ms into the future (partly as a result of an old bug in the scheduling - * code). In an attempt to avoid trouble, we will use a minimum scheduling - * length of 512 frames instead of 256. - */ -#define FUSBH200_TUNE_FLS 1 /* (medium) 512-frame schedule */ - -/* Initial IRQ latency: faster than hw default */ -static int log2_irq_thresh = 0; // 0 to 6 -module_param (log2_irq_thresh, int, S_IRUGO); -MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); - -/* initial park setting: slower than hw default */ -static unsigned park = 0; -module_param (park, uint, S_IRUGO); -MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); - -/* for link power management(LPM) feature */ -static unsigned int hird; -module_param(hird, int, S_IRUGO); -MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); - -#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) - -#include "fusbh200.h" - -/*-------------------------------------------------------------------------*/ - -#define fusbh200_dbg(fusbh200, fmt, args...) \ - dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args ) -#define fusbh200_err(fusbh200, fmt, args...) \ - dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args ) -#define fusbh200_info(fusbh200, fmt, args...) \ - dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args ) -#define fusbh200_warn(fusbh200, fmt, args...) \ - dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args ) - -/* check the values in the HCSPARAMS register - * (host controller _Structural_ parameters) - * see EHCI spec, Table 2-4 for each value - */ -static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label) -{ - u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params); - - fusbh200_dbg (fusbh200, - "%s hcs_params 0x%x ports=%d\n", - label, params, - HCS_N_PORTS (params) - ); -} - -/* check the values in the HCCPARAMS register - * (host controller _Capability_ parameters) - * see EHCI Spec, Table 2-5 for each value - * */ -static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label) -{ - u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params); - - fusbh200_dbg (fusbh200, - "%s hcc_params %04x uframes %s%s\n", - label, - params, - HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", - HCC_CANPARK(params) ? " park" : ""); -} - -static void __maybe_unused -dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd) -{ - fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, - hc32_to_cpup(fusbh200, &qtd->hw_next), - hc32_to_cpup(fusbh200, &qtd->hw_alt_next), - hc32_to_cpup(fusbh200, &qtd->hw_token), - hc32_to_cpup(fusbh200, &qtd->hw_buf [0])); - if (qtd->hw_buf [1]) - fusbh200_dbg(fusbh200, " p1=%08x p2=%08x p3=%08x p4=%08x\n", - hc32_to_cpup(fusbh200, &qtd->hw_buf[1]), - hc32_to_cpup(fusbh200, &qtd->hw_buf[2]), - hc32_to_cpup(fusbh200, &qtd->hw_buf[3]), - hc32_to_cpup(fusbh200, &qtd->hw_buf[4])); -} - -static void __maybe_unused -dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - struct fusbh200_qh_hw *hw = qh->hw; - - fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label, - qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); - dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next); -} - -static void __maybe_unused -dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd) -{ - fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n", - label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next), - itd->urb); - fusbh200_dbg (fusbh200, - " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", - hc32_to_cpu(fusbh200, itd->hw_transaction[0]), - hc32_to_cpu(fusbh200, itd->hw_transaction[1]), - hc32_to_cpu(fusbh200, itd->hw_transaction[2]), - hc32_to_cpu(fusbh200, itd->hw_transaction[3]), - hc32_to_cpu(fusbh200, itd->hw_transaction[4]), - hc32_to_cpu(fusbh200, itd->hw_transaction[5]), - hc32_to_cpu(fusbh200, itd->hw_transaction[6]), - hc32_to_cpu(fusbh200, itd->hw_transaction[7])); - fusbh200_dbg (fusbh200, - " buf: %08x %08x %08x %08x %08x %08x %08x\n", - hc32_to_cpu(fusbh200, itd->hw_bufp[0]), - hc32_to_cpu(fusbh200, itd->hw_bufp[1]), - hc32_to_cpu(fusbh200, itd->hw_bufp[2]), - hc32_to_cpu(fusbh200, itd->hw_bufp[3]), - hc32_to_cpu(fusbh200, itd->hw_bufp[4]), - hc32_to_cpu(fusbh200, itd->hw_bufp[5]), - hc32_to_cpu(fusbh200, itd->hw_bufp[6])); - fusbh200_dbg (fusbh200, " index: %d %d %d %d %d %d %d %d\n", - itd->index[0], itd->index[1], itd->index[2], - itd->index[3], itd->index[4], itd->index[5], - itd->index[6], itd->index[7]); -} - -static int __maybe_unused -dbg_status_buf (char *buf, unsigned len, const char *label, u32 status) -{ - return scnprintf (buf, len, - "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", - label, label [0] ? " " : "", status, - (status & STS_ASS) ? " Async" : "", - (status & STS_PSS) ? " Periodic" : "", - (status & STS_RECL) ? " Recl" : "", - (status & STS_HALT) ? " Halt" : "", - (status & STS_IAA) ? " IAA" : "", - (status & STS_FATAL) ? " FATAL" : "", - (status & STS_FLR) ? " FLR" : "", - (status & STS_PCD) ? " PCD" : "", - (status & STS_ERR) ? " ERR" : "", - (status & STS_INT) ? " INT" : "" - ); -} - -static int __maybe_unused -dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable) -{ - return scnprintf (buf, len, - "%s%sintrenable %02x%s%s%s%s%s%s", - label, label [0] ? " " : "", enable, - (enable & STS_IAA) ? " IAA" : "", - (enable & STS_FATAL) ? " FATAL" : "", - (enable & STS_FLR) ? " FLR" : "", - (enable & STS_PCD) ? " PCD" : "", - (enable & STS_ERR) ? " ERR" : "", - (enable & STS_INT) ? " INT" : "" - ); -} - -static const char *const fls_strings [] = - { "1024", "512", "256", "??" }; - -static int -dbg_command_buf (char *buf, unsigned len, const char *label, u32 command) -{ - return scnprintf (buf, len, - "%s%scommand %07x %s=%d ithresh=%d%s%s%s " - "period=%s%s %s", - label, label [0] ? " " : "", command, - (command & CMD_PARK) ? " park" : "(park)", - CMD_PARK_CNT (command), - (command >> 16) & 0x3f, - (command & CMD_IAAD) ? " IAAD" : "", - (command & CMD_ASE) ? " Async" : "", - (command & CMD_PSE) ? " Periodic" : "", - fls_strings [(command >> 2) & 0x3], - (command & CMD_RESET) ? " Reset" : "", - (command & CMD_RUN) ? "RUN" : "HALT" - ); -} - -static int -dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) -{ - char *sig; - - /* signaling state */ - switch (status & (3 << 10)) { - case 0 << 10: sig = "se0"; break; - case 1 << 10: sig = "k"; break; /* low speed */ - case 2 << 10: sig = "j"; break; - default: sig = "?"; break; - } - - return scnprintf (buf, len, - "%s%sport:%d status %06x %d " - "sig=%s%s%s%s%s%s%s%s", - label, label [0] ? " " : "", port, status, - status>>25,/*device address */ - sig, - (status & PORT_RESET) ? " RESET" : "", - (status & PORT_SUSPEND) ? " SUSPEND" : "", - (status & PORT_RESUME) ? " RESUME" : "", - (status & PORT_PEC) ? " PEC" : "", - (status & PORT_PE) ? " PE" : "", - (status & PORT_CSC) ? " CSC" : "", - (status & PORT_CONNECT) ? " CONNECT" : ""); -} - -/* functions have the "wrong" filename when they're output... */ -#define dbg_status(fusbh200, label, status) { \ - char _buf [80]; \ - dbg_status_buf (_buf, sizeof _buf, label, status); \ - fusbh200_dbg (fusbh200, "%s\n", _buf); \ -} - -#define dbg_cmd(fusbh200, label, command) { \ - char _buf [80]; \ - dbg_command_buf (_buf, sizeof _buf, label, command); \ - fusbh200_dbg (fusbh200, "%s\n", _buf); \ -} - -#define dbg_port(fusbh200, label, port, status) { \ - char _buf [80]; \ - dbg_port_buf (_buf, sizeof _buf, label, port, status); \ - fusbh200_dbg (fusbh200, "%s\n", _buf); \ -} - -/*-------------------------------------------------------------------------*/ - -/* troubleshooting help: expose state in debugfs */ - -static int debug_async_open(struct inode *, struct file *); -static int debug_periodic_open(struct inode *, struct file *); -static int debug_registers_open(struct inode *, struct file *); -static int debug_async_open(struct inode *, struct file *); - -static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); -static int debug_close(struct inode *, struct file *); - -static const struct file_operations debug_async_fops = { - .owner = THIS_MODULE, - .open = debug_async_open, - .read = debug_output, - .release = debug_close, - .llseek = default_llseek, -}; -static const struct file_operations debug_periodic_fops = { - .owner = THIS_MODULE, - .open = debug_periodic_open, - .read = debug_output, - .release = debug_close, - .llseek = default_llseek, -}; -static const struct file_operations debug_registers_fops = { - .owner = THIS_MODULE, - .open = debug_registers_open, - .read = debug_output, - .release = debug_close, - .llseek = default_llseek, -}; - -static struct dentry *fusbh200_debug_root; - -struct debug_buffer { - ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ - struct usb_bus *bus; - struct mutex mutex; /* protect filling of buffer */ - size_t count; /* number of characters filled into buffer */ - char *output_buf; - size_t alloc_size; -}; - -#define speed_char(info1) ({ char tmp; \ - switch (info1 & (3 << 12)) { \ - case QH_FULL_SPEED: tmp = 'f'; break; \ - case QH_LOW_SPEED: tmp = 'l'; break; \ - case QH_HIGH_SPEED: tmp = 'h'; break; \ - default: tmp = '?'; break; \ - } tmp; }) - -static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token) -{ - __u32 v = hc32_to_cpu(fusbh200, token); - - if (v & QTD_STS_ACTIVE) - return '*'; - if (v & QTD_STS_HALT) - return '-'; - if (!IS_SHORT_READ (v)) - return ' '; - /* tries to advance through hw_alt_next */ - return '/'; -} - -static void qh_lines ( - struct fusbh200_hcd *fusbh200, - struct fusbh200_qh *qh, - char **nextp, - unsigned *sizep -) -{ - u32 scratch; - u32 hw_curr; - struct fusbh200_qtd *td; - unsigned temp; - unsigned size = *sizep; - char *next = *nextp; - char mark; - __le32 list_end = FUSBH200_LIST_END(fusbh200); - struct fusbh200_qh_hw *hw = qh->hw; - - if (hw->hw_qtd_next == list_end) /* NEC does this */ - mark = '@'; - else - mark = token_mark(fusbh200, hw->hw_token); - if (mark == '/') { /* qh_alt_next controls qh advance? */ - if ((hw->hw_alt_next & QTD_MASK(fusbh200)) - == fusbh200->async->hw->hw_alt_next) - mark = '#'; /* blocked */ - else if (hw->hw_alt_next == list_end) - mark = '.'; /* use hw_qtd_next */ - /* else alt_next points to some other qtd */ - } - scratch = hc32_to_cpup(fusbh200, &hw->hw_info1); - hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0; - temp = scnprintf (next, size, - "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", - qh, scratch & 0x007f, - speed_char (scratch), - (scratch >> 8) & 0x000f, - scratch, hc32_to_cpup(fusbh200, &hw->hw_info2), - hc32_to_cpup(fusbh200, &hw->hw_token), mark, - (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token) - ? "data1" : "data0", - (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f); - size -= temp; - next += temp; - - /* hc may be modifying the list as we read it ... */ - list_for_each_entry(td, &qh->qtd_list, qtd_list) { - scratch = hc32_to_cpup(fusbh200, &td->hw_token); - mark = ' '; - if (hw_curr == td->qtd_dma) - mark = '*'; - else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma)) - mark = '+'; - else if (QTD_LENGTH (scratch)) { - if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next) - mark = '#'; - else if (td->hw_alt_next != list_end) - mark = '/'; - } - temp = snprintf (next, size, - "\n\t%p%c%s len=%d %08x urb %p", - td, mark, ({ char *tmp; - switch ((scratch>>8)&0x03) { - case 0: tmp = "out"; break; - case 1: tmp = "in"; break; - case 2: tmp = "setup"; break; - default: tmp = "?"; break; - } tmp;}), - (scratch >> 16) & 0x7fff, - scratch, - td->urb); - if (size < temp) - temp = size; - size -= temp; - next += temp; - if (temp == size) - goto done; - } - - temp = snprintf (next, size, "\n"); - if (size < temp) - temp = size; - size -= temp; - next += temp; - -done: - *sizep = size; - *nextp = next; -} - -static ssize_t fill_async_buffer(struct debug_buffer *buf) -{ - struct usb_hcd *hcd; - struct fusbh200_hcd *fusbh200; - unsigned long flags; - unsigned temp, size; - char *next; - struct fusbh200_qh *qh; - - hcd = bus_to_hcd(buf->bus); - fusbh200 = hcd_to_fusbh200 (hcd); - next = buf->output_buf; - size = buf->alloc_size; - - *next = 0; - - /* dumps a snapshot of the async schedule. - * usually empty except for long-term bulk reads, or head. - * one QH per line, and TDs we know about - */ - spin_lock_irqsave (&fusbh200->lock, flags); - for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) - qh_lines (fusbh200, qh, &next, &size); - if (fusbh200->async_unlink && size > 0) { - temp = scnprintf(next, size, "\nunlink =\n"); - size -= temp; - next += temp; - - for (qh = fusbh200->async_unlink; size > 0 && qh; - qh = qh->unlink_next) - qh_lines (fusbh200, qh, &next, &size); - } - spin_unlock_irqrestore (&fusbh200->lock, flags); - - return strlen(buf->output_buf); -} - -#define DBG_SCHED_LIMIT 64 -static ssize_t fill_periodic_buffer(struct debug_buffer *buf) -{ - struct usb_hcd *hcd; - struct fusbh200_hcd *fusbh200; - unsigned long flags; - union fusbh200_shadow p, *seen; - unsigned temp, size, seen_count; - char *next; - unsigned i; - __hc32 tag; - - seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC); - if (!seen) - return 0; - seen_count = 0; - - hcd = bus_to_hcd(buf->bus); - fusbh200 = hcd_to_fusbh200 (hcd); - next = buf->output_buf; - size = buf->alloc_size; - - temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size); - size -= temp; - next += temp; - - /* dump a snapshot of the periodic schedule. - * iso changes, interrupt usually doesn't. - */ - spin_lock_irqsave (&fusbh200->lock, flags); - for (i = 0; i < fusbh200->periodic_size; i++) { - p = fusbh200->pshadow [i]; - if (likely (!p.ptr)) - continue; - tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]); - - temp = scnprintf (next, size, "%4d: ", i); - size -= temp; - next += temp; - - do { - struct fusbh200_qh_hw *hw; - - switch (hc32_to_cpu(fusbh200, tag)) { - case Q_TYPE_QH: - hw = p.qh->hw; - temp = scnprintf (next, size, " qh%d-%04x/%p", - p.qh->period, - hc32_to_cpup(fusbh200, - &hw->hw_info2) - /* uframe masks */ - & (QH_CMASK | QH_SMASK), - p.qh); - size -= temp; - next += temp; - /* don't repeat what follows this qh */ - for (temp = 0; temp < seen_count; temp++) { - if (seen [temp].ptr != p.ptr) - continue; - if (p.qh->qh_next.ptr) { - temp = scnprintf (next, size, - " ..."); - size -= temp; - next += temp; - } - break; - } - /* show more info the first time around */ - if (temp == seen_count) { - u32 scratch = hc32_to_cpup(fusbh200, - &hw->hw_info1); - struct fusbh200_qtd *qtd; - char *type = ""; - - /* count tds, get ep direction */ - temp = 0; - list_for_each_entry (qtd, - &p.qh->qtd_list, - qtd_list) { - temp++; - switch (0x03 & (hc32_to_cpu( - fusbh200, - qtd->hw_token) >> 8)) { - case 0: type = "out"; continue; - case 1: type = "in"; continue; - } - } - - temp = scnprintf (next, size, - " (%c%d ep%d%s " - "[%d/%d] q%d p%d)", - speed_char (scratch), - scratch & 0x007f, - (scratch >> 8) & 0x000f, type, - p.qh->usecs, p.qh->c_usecs, - temp, - 0x7ff & (scratch >> 16)); - - if (seen_count < DBG_SCHED_LIMIT) - seen [seen_count++].qh = p.qh; - } else - temp = 0; - tag = Q_NEXT_TYPE(fusbh200, hw->hw_next); - p = p.qh->qh_next; - break; - case Q_TYPE_FSTN: - temp = scnprintf (next, size, - " fstn-%8x/%p", p.fstn->hw_prev, - p.fstn); - tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next); - p = p.fstn->fstn_next; - break; - case Q_TYPE_ITD: - temp = scnprintf (next, size, - " itd/%p", p.itd); - tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next); - p = p.itd->itd_next; - break; - } - size -= temp; - next += temp; - } while (p.ptr); - - temp = scnprintf (next, size, "\n"); - size -= temp; - next += temp; - } - spin_unlock_irqrestore (&fusbh200->lock, flags); - kfree (seen); - - return buf->alloc_size - size; -} -#undef DBG_SCHED_LIMIT - -static const char *rh_state_string(struct fusbh200_hcd *fusbh200) -{ - switch (fusbh200->rh_state) { - case FUSBH200_RH_HALTED: - return "halted"; - case FUSBH200_RH_SUSPENDED: - return "suspended"; - case FUSBH200_RH_RUNNING: - return "running"; - case FUSBH200_RH_STOPPING: - return "stopping"; - } - return "?"; -} - -static ssize_t fill_registers_buffer(struct debug_buffer *buf) -{ - struct usb_hcd *hcd; - struct fusbh200_hcd *fusbh200; - unsigned long flags; - unsigned temp, size, i; - char *next, scratch [80]; - static char fmt [] = "%*s\n"; - static char label [] = ""; - - hcd = bus_to_hcd(buf->bus); - fusbh200 = hcd_to_fusbh200 (hcd); - next = buf->output_buf; - size = buf->alloc_size; - - spin_lock_irqsave (&fusbh200->lock, flags); - - if (!HCD_HW_ACCESSIBLE(hcd)) { - size = scnprintf (next, size, - "bus %s, device %s\n" - "%s\n" - "SUSPENDED (no register access)\n", - hcd->self.controller->bus->name, - dev_name(hcd->self.controller), - hcd->product_desc); - goto done; - } - - /* Capability Registers */ - i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase)); - temp = scnprintf (next, size, - "bus %s, device %s\n" - "%s\n" - "EHCI %x.%02x, rh state %s\n", - hcd->self.controller->bus->name, - dev_name(hcd->self.controller), - hcd->product_desc, - i >> 8, i & 0x0ff, rh_state_string(fusbh200)); - size -= temp; - next += temp; - - // FIXME interpret both types of params - i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params); - temp = scnprintf (next, size, "structural params 0x%08x\n", i); - size -= temp; - next += temp; - - i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params); - temp = scnprintf (next, size, "capability params 0x%08x\n", i); - size -= temp; - next += temp; - - /* Operational Registers */ - temp = dbg_status_buf (scratch, sizeof scratch, label, - fusbh200_readl(fusbh200, &fusbh200->regs->status)); - temp = scnprintf (next, size, fmt, temp, scratch); - size -= temp; - next += temp; - - temp = dbg_command_buf (scratch, sizeof scratch, label, - fusbh200_readl(fusbh200, &fusbh200->regs->command)); - temp = scnprintf (next, size, fmt, temp, scratch); - size -= temp; - next += temp; - - temp = dbg_intr_buf (scratch, sizeof scratch, label, - fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable)); - temp = scnprintf (next, size, fmt, temp, scratch); - size -= temp; - next += temp; - - temp = scnprintf (next, size, "uframe %04x\n", - fusbh200_read_frame_index(fusbh200)); - size -= temp; - next += temp; - - if (fusbh200->async_unlink) { - temp = scnprintf(next, size, "async unlink qh %p\n", - fusbh200->async_unlink); - size -= temp; - next += temp; - } - - temp = scnprintf (next, size, - "irq normal %ld err %ld iaa %ld (lost %ld)\n", - fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa, - fusbh200->stats.lost_iaa); - size -= temp; - next += temp; - - temp = scnprintf (next, size, "complete %ld unlink %ld\n", - fusbh200->stats.complete, fusbh200->stats.unlink); - size -= temp; - next += temp; - -done: - spin_unlock_irqrestore (&fusbh200->lock, flags); - - return buf->alloc_size - size; -} - -static struct debug_buffer *alloc_buffer(struct usb_bus *bus, - ssize_t (*fill_func)(struct debug_buffer *)) -{ - struct debug_buffer *buf; - - buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); - - if (buf) { - buf->bus = bus; - buf->fill_func = fill_func; - mutex_init(&buf->mutex); - buf->alloc_size = PAGE_SIZE; - } - - return buf; -} - -static int fill_buffer(struct debug_buffer *buf) -{ - int ret = 0; - - if (!buf->output_buf) - buf->output_buf = vmalloc(buf->alloc_size); - - if (!buf->output_buf) { - ret = -ENOMEM; - goto out; - } - - ret = buf->fill_func(buf); - - if (ret >= 0) { - buf->count = ret; - ret = 0; - } - -out: - return ret; -} - -static ssize_t debug_output(struct file *file, char __user *user_buf, - size_t len, loff_t *offset) -{ - struct debug_buffer *buf = file->private_data; - int ret = 0; - - mutex_lock(&buf->mutex); - if (buf->count == 0) { - ret = fill_buffer(buf); - if (ret != 0) { - mutex_unlock(&buf->mutex); - goto out; - } - } - mutex_unlock(&buf->mutex); - - ret = simple_read_from_buffer(user_buf, len, offset, - buf->output_buf, buf->count); - -out: - return ret; - -} - -static int debug_close(struct inode *inode, struct file *file) -{ - struct debug_buffer *buf = file->private_data; - - if (buf) { - vfree(buf->output_buf); - kfree(buf); - } - - return 0; -} -static int debug_async_open(struct inode *inode, struct file *file) -{ - file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); - - return file->private_data ? 0 : -ENOMEM; -} - -static int debug_periodic_open(struct inode *inode, struct file *file) -{ - struct debug_buffer *buf; - buf = alloc_buffer(inode->i_private, fill_periodic_buffer); - if (!buf) - return -ENOMEM; - - buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE; - file->private_data = buf; - return 0; -} - -static int debug_registers_open(struct inode *inode, struct file *file) -{ - file->private_data = alloc_buffer(inode->i_private, - fill_registers_buffer); - - return file->private_data ? 0 : -ENOMEM; -} - -static inline void create_debug_files (struct fusbh200_hcd *fusbh200) -{ - struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self; - - fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root); - if (!fusbh200->debug_dir) - return; - - if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus, - &debug_async_fops)) - goto file_error; - - if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus, - &debug_periodic_fops)) - goto file_error; - - if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus, - &debug_registers_fops)) - goto file_error; - - return; - -file_error: - debugfs_remove_recursive(fusbh200->debug_dir); -} - -static inline void remove_debug_files (struct fusbh200_hcd *fusbh200) -{ - debugfs_remove_recursive(fusbh200->debug_dir); -} - -/*-------------------------------------------------------------------------*/ - -/* - * handshake - spin reading hc until handshake completes or fails - * @ptr: address of hc register to be read - * @mask: bits to look at in result of read - * @done: value of those bits when handshake succeeds - * @usec: timeout in microseconds - * - * Returns negative errno, or zero on success - * - * Success happens when the "mask" bits have the specified value (hardware - * handshake done). There are two failure modes: "usec" have passed (major - * hardware flakeout), or the register reads as all-ones (hardware removed). - * - * That last failure should_only happen in cases like physical cardbus eject - * before driver shutdown. But it also seems to be caused by bugs in cardbus - * bridge shutdown: shutting down the bridge before the devices using it. - */ -static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr, - u32 mask, u32 done, int usec) -{ - u32 result; - - do { - result = fusbh200_readl(fusbh200, ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay (1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; -} - -/* - * Force HC to halt state from unknown (EHCI spec section 2.3). - * Must be called with interrupts enabled and the lock not held. - */ -static int fusbh200_halt (struct fusbh200_hcd *fusbh200) -{ - u32 temp; - - spin_lock_irq(&fusbh200->lock); - - /* disable any irqs left enabled by previous code */ - fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable); - - /* - * This routine gets called during probe before fusbh200->command - * has been initialized, so we can't rely on its value. - */ - fusbh200->command &= ~CMD_RUN; - temp = fusbh200_readl(fusbh200, &fusbh200->regs->command); - temp &= ~(CMD_RUN | CMD_IAAD); - fusbh200_writel(fusbh200, temp, &fusbh200->regs->command); - - spin_unlock_irq(&fusbh200->lock); - synchronize_irq(fusbh200_to_hcd(fusbh200)->irq); - - return handshake(fusbh200, &fusbh200->regs->status, - STS_HALT, STS_HALT, 16 * 125); -} - -/* - * Reset a non-running (STS_HALT == 1) controller. - * Must be called with interrupts enabled and the lock not held. - */ -static int fusbh200_reset (struct fusbh200_hcd *fusbh200) -{ - int retval; - u32 command = fusbh200_readl(fusbh200, &fusbh200->regs->command); - - /* If the EHCI debug controller is active, special care must be - * taken before and after a host controller reset */ - if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200))) - fusbh200->debug = NULL; - - command |= CMD_RESET; - dbg_cmd (fusbh200, "reset", command); - fusbh200_writel(fusbh200, command, &fusbh200->regs->command); - fusbh200->rh_state = FUSBH200_RH_HALTED; - fusbh200->next_statechange = jiffies; - retval = handshake (fusbh200, &fusbh200->regs->command, - CMD_RESET, 0, 250 * 1000); - - if (retval) - return retval; - - if (fusbh200->debug) - dbgp_external_startup(fusbh200_to_hcd(fusbh200)); - - fusbh200->port_c_suspend = fusbh200->suspended_ports = - fusbh200->resuming_ports = 0; - return retval; -} - -/* - * Idle the controller (turn off the schedules). - * Must be called with interrupts enabled and the lock not held. - */ -static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200) -{ - u32 temp; - - if (fusbh200->rh_state != FUSBH200_RH_RUNNING) - return; - - /* wait for any schedule enables/disables to take effect */ - temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS); - handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125); - - /* then disable anything that's still active */ - spin_lock_irq(&fusbh200->lock); - fusbh200->command &= ~(CMD_ASE | CMD_PSE); - fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command); - spin_unlock_irq(&fusbh200->lock); - - /* hardware can take 16 microframes to turn off ... */ - handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125); -} - -/*-------------------------------------------------------------------------*/ - -static void end_unlink_async(struct fusbh200_hcd *fusbh200); -static void unlink_empty_async(struct fusbh200_hcd *fusbh200); -static void fusbh200_work(struct fusbh200_hcd *fusbh200); -static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh); -static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh); - -/*-------------------------------------------------------------------------*/ - -/* Set a bit in the USBCMD register */ -static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit) -{ - fusbh200->command |= bit; - fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command); - - /* unblock posted write */ - fusbh200_readl(fusbh200, &fusbh200->regs->command); -} - -/* Clear a bit in the USBCMD register */ -static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit) -{ - fusbh200->command &= ~bit; - fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command); - - /* unblock posted write */ - fusbh200_readl(fusbh200, &fusbh200->regs->command); -} - -/*-------------------------------------------------------------------------*/ - -/* - * EHCI timer support... Now using hrtimers. - * - * Lots of different events are triggered from fusbh200->hrtimer. Whenever - * the timer routine runs, it checks each possible event; events that are - * currently enabled and whose expiration time has passed get handled. - * The set of enabled events is stored as a collection of bitflags in - * fusbh200->enabled_hrtimer_events, and they are numbered in order of - * increasing delay values (ranging between 1 ms and 100 ms). - * - * Rather than implementing a sorted list or tree of all pending events, - * we keep track only of the lowest-numbered pending event, in - * fusbh200->next_hrtimer_event. Whenever fusbh200->hrtimer gets restarted, its - * expiration time is set to the timeout value for this event. - * - * As a result, events might not get handled right away; the actual delay - * could be anywhere up to twice the requested delay. This doesn't - * matter, because none of the events are especially time-critical. The - * ones that matter most all have a delay of 1 ms, so they will be - * handled after 2 ms at most, which is okay. In addition to this, we - * allow for an expiration range of 1 ms. - */ - -/* - * Delay lengths for the hrtimer event types. - * Keep this list sorted by delay length, in the same order as - * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h. - */ -static unsigned event_delays_ns[] = { - 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_ASS */ - 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_PSS */ - 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_DEAD */ - 1125 * NSEC_PER_USEC, /* FUSBH200_HRTIMER_UNLINK_INTR */ - 2 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_FREE_ITDS */ - 6 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */ - 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IAA_WATCHDOG */ - 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */ - 15 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_ASYNC */ - 100 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IO_WATCHDOG */ -}; - -/* Enable a pending hrtimer event */ -static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event, - bool resched) -{ - ktime_t *timeout = &fusbh200->hr_timeouts[event]; - - if (resched) - *timeout = ktime_add(ktime_get(), - ktime_set(0, event_delays_ns[event])); - fusbh200->enabled_hrtimer_events |= (1 << event); - - /* Track only the lowest-numbered pending event */ - if (event < fusbh200->next_hrtimer_event) { - fusbh200->next_hrtimer_event = event; - hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout, - NSEC_PER_MSEC, HRTIMER_MODE_ABS); - } -} - - -/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ -static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200) -{ - unsigned actual, want; - - /* Don't enable anything if the controller isn't running (e.g., died) */ - if (fusbh200->rh_state != FUSBH200_RH_RUNNING) - return; - - want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0; - actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS; - - if (want != actual) { - - /* Poll again later, but give up after about 20 ms */ - if (fusbh200->ASS_poll_count++ < 20) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true); - return; - } - fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n", - want, actual); - } - fusbh200->ASS_poll_count = 0; - - /* The status is up-to-date; restart or stop the schedule as needed */ - if (want == 0) { /* Stopped */ - if (fusbh200->async_count > 0) - fusbh200_set_command_bit(fusbh200, CMD_ASE); - - } else { /* Running */ - if (fusbh200->async_count == 0) { - - /* Turn off the schedule after a while */ - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC, - true); - } - } -} - -/* Turn off the async schedule after a brief delay */ -static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200) -{ - fusbh200_clear_command_bit(fusbh200, CMD_ASE); -} - - -/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ -static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200) -{ - unsigned actual, want; - - /* Don't do anything if the controller isn't running (e.g., died) */ - if (fusbh200->rh_state != FUSBH200_RH_RUNNING) - return; - - want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0; - actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS; - - if (want != actual) { - - /* Poll again later, but give up after about 20 ms */ - if (fusbh200->PSS_poll_count++ < 20) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true); - return; - } - fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n", - want, actual); - } - fusbh200->PSS_poll_count = 0; - - /* The status is up-to-date; restart or stop the schedule as needed */ - if (want == 0) { /* Stopped */ - if (fusbh200->periodic_count > 0) - fusbh200_set_command_bit(fusbh200, CMD_PSE); - - } else { /* Running */ - if (fusbh200->periodic_count == 0) { - - /* Turn off the schedule after a while */ - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC, - true); - } - } -} - -/* Turn off the periodic schedule after a brief delay */ -static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200) -{ - fusbh200_clear_command_bit(fusbh200, CMD_PSE); -} - - -/* Poll the STS_HALT status bit; see when a dead controller stops */ -static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200) -{ - if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) { - - /* Give up after a few milliseconds */ - if (fusbh200->died_poll_count++ < 5) { - /* Try again later */ - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true); - return; - } - fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n"); - } - - /* Clean up the mess */ - fusbh200->rh_state = FUSBH200_RH_HALTED; - fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable); - fusbh200_work(fusbh200); - end_unlink_async(fusbh200); - - /* Not in process context, so don't try to reset the controller */ -} - - -/* Handle unlinked interrupt QHs once they are gone from the hardware */ -static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200) -{ - bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING); - - /* - * Process all the QHs on the intr_unlink list that were added - * before the current unlink cycle began. The list is in - * temporal order, so stop when we reach the first entry in the - * current cycle. But if the root hub isn't running then - * process all the QHs on the list. - */ - fusbh200->intr_unlinking = true; - while (fusbh200->intr_unlink) { - struct fusbh200_qh *qh = fusbh200->intr_unlink; - - if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle) - break; - fusbh200->intr_unlink = qh->unlink_next; - qh->unlink_next = NULL; - end_unlink_intr(fusbh200, qh); - } - - /* Handle remaining entries later */ - if (fusbh200->intr_unlink) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true); - ++fusbh200->intr_unlink_cycle; - } - fusbh200->intr_unlinking = false; -} - - -/* Start another free-iTDs/siTDs cycle */ -static void start_free_itds(struct fusbh200_hcd *fusbh200) -{ - if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) { - fusbh200->last_itd_to_free = list_entry( - fusbh200->cached_itd_list.prev, - struct fusbh200_itd, itd_list); - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true); - } -} - -/* Wait for controller to stop using old iTDs and siTDs */ -static void end_free_itds(struct fusbh200_hcd *fusbh200) -{ - struct fusbh200_itd *itd, *n; - - if (fusbh200->rh_state < FUSBH200_RH_RUNNING) { - fusbh200->last_itd_to_free = NULL; - } - - list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) { - list_del(&itd->itd_list); - dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma); - if (itd == fusbh200->last_itd_to_free) - break; - } - - if (!list_empty(&fusbh200->cached_itd_list)) - start_free_itds(fusbh200); -} - - -/* Handle lost (or very late) IAA interrupts */ -static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200) -{ - if (fusbh200->rh_state != FUSBH200_RH_RUNNING) - return; - - /* - * Lost IAA irqs wedge things badly; seen first with a vt8235. - * So we need this watchdog, but must protect it against both - * (a) SMP races against real IAA firing and retriggering, and - * (b) clean HC shutdown, when IAA watchdog was pending. - */ - if (fusbh200->async_iaa) { - u32 cmd, status; - - /* If we get here, IAA is *REALLY* late. It's barely - * conceivable that the system is so busy that CMD_IAAD - * is still legitimately set, so let's be sure it's - * clear before we read STS_IAA. (The HC should clear - * CMD_IAAD when it sets STS_IAA.) - */ - cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command); - - /* - * If IAA is set here it either legitimately triggered - * after the watchdog timer expired (_way_ late, so we'll - * still count it as lost) ... or a silicon erratum: - * - VIA seems to set IAA without triggering the IRQ; - * - IAAD potentially cleared without setting IAA. - */ - status = fusbh200_readl(fusbh200, &fusbh200->regs->status); - if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { - COUNT(fusbh200->stats.lost_iaa); - fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status); - } - - fusbh200_dbg(fusbh200, "IAA watchdog: status %x cmd %x\n", - status, cmd); - end_unlink_async(fusbh200); - } -} - - -/* Enable the I/O watchdog, if appropriate */ -static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200) -{ - /* Not needed if the controller isn't running or it's already enabled */ - if (fusbh200->rh_state != FUSBH200_RH_RUNNING || - (fusbh200->enabled_hrtimer_events & - BIT(FUSBH200_HRTIMER_IO_WATCHDOG))) - return; - - /* - * Isochronous transfers always need the watchdog. - * For other sorts we use it only if the flag is set. - */ - if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog && - fusbh200->async_count + fusbh200->intr_count > 0)) - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true); -} - - -/* - * Handler functions for the hrtimer event types. - * Keep this array in the same order as the event types indexed by - * enum fusbh200_hrtimer_event in fusbh200.h. - */ -static void (*event_handlers[])(struct fusbh200_hcd *) = { - fusbh200_poll_ASS, /* FUSBH200_HRTIMER_POLL_ASS */ - fusbh200_poll_PSS, /* FUSBH200_HRTIMER_POLL_PSS */ - fusbh200_handle_controller_death, /* FUSBH200_HRTIMER_POLL_DEAD */ - fusbh200_handle_intr_unlinks, /* FUSBH200_HRTIMER_UNLINK_INTR */ - end_free_itds, /* FUSBH200_HRTIMER_FREE_ITDS */ - unlink_empty_async, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */ - fusbh200_iaa_watchdog, /* FUSBH200_HRTIMER_IAA_WATCHDOG */ - fusbh200_disable_PSE, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */ - fusbh200_disable_ASE, /* FUSBH200_HRTIMER_DISABLE_ASYNC */ - fusbh200_work, /* FUSBH200_HRTIMER_IO_WATCHDOG */ -}; - -static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t) -{ - struct fusbh200_hcd *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer); - ktime_t now; - unsigned long events; - unsigned long flags; - unsigned e; - - spin_lock_irqsave(&fusbh200->lock, flags); - - events = fusbh200->enabled_hrtimer_events; - fusbh200->enabled_hrtimer_events = 0; - fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT; - - /* - * Check each pending event. If its time has expired, handle - * the event; otherwise re-enable it. - */ - now = ktime_get(); - for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) { - if (now.tv64 >= fusbh200->hr_timeouts[e].tv64) - event_handlers[e](fusbh200); - else - fusbh200_enable_event(fusbh200, e, false); - } - - spin_unlock_irqrestore(&fusbh200->lock, flags); - return HRTIMER_NORESTART; -} - -/*-------------------------------------------------------------------------*/ - -#define fusbh200_bus_suspend NULL -#define fusbh200_bus_resume NULL - -/*-------------------------------------------------------------------------*/ - -static int check_reset_complete ( - struct fusbh200_hcd *fusbh200, - int index, - u32 __iomem *status_reg, - int port_status -) { - if (!(port_status & PORT_CONNECT)) - return port_status; - - /* if reset finished and it's still not enabled -- handoff */ - if (!(port_status & PORT_PE)) { - /* with integrated TT, there's nobody to hand it to! */ - fusbh200_dbg (fusbh200, - "Failed to enable port %d on root hub TT\n", - index+1); - return port_status; - } else { - fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n", - index + 1); - } - - return port_status; -} - -/*-------------------------------------------------------------------------*/ - - -/* build "status change" packet (one or two bytes) from HC registers */ - -static int -fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - u32 temp, status; - u32 mask; - int retval = 1; - unsigned long flags; - - /* init status to no-changes */ - buf [0] = 0; - - /* Inform the core about resumes-in-progress by returning - * a non-zero value even if there are no status changes. - */ - status = fusbh200->resuming_ports; - - mask = PORT_CSC | PORT_PEC; - // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND - - /* no hub change reports (bit 0) for now (power, ...) */ - - /* port N changes (bit N)? */ - spin_lock_irqsave (&fusbh200->lock, flags); - - temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status); - - /* - * Return status information even for ports with OWNER set. - * Otherwise hub_wq wouldn't see the disconnect event when a - * high-speed device is switched over to the companion - * controller by the user. - */ - - if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend) - || (fusbh200->reset_done[0] && time_after_eq( - jiffies, fusbh200->reset_done[0]))) { - buf [0] |= 1 << 1; - status = STS_PCD; - } - /* FIXME autosuspend idle root hubs */ - spin_unlock_irqrestore (&fusbh200->lock, flags); - return status ? retval : 0; -} - -/*-------------------------------------------------------------------------*/ - -static void -fusbh200_hub_descriptor ( - struct fusbh200_hcd *fusbh200, - struct usb_hub_descriptor *desc -) { - int ports = HCS_N_PORTS (fusbh200->hcs_params); - u16 temp; - - desc->bDescriptorType = USB_DT_HUB; - desc->bPwrOn2PwrGood = 10; /* fusbh200 1.0, 2.3.9 says 20ms max */ - desc->bHubContrCurrent = 0; - - desc->bNbrPorts = ports; - temp = 1 + (ports / 8); - desc->bDescLength = 7 + 2 * temp; - - /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ - memset(&desc->u.hs.DeviceRemovable[0], 0, temp); - memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); - - temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */ - temp |= HUB_CHAR_NO_LPSM; /* no power switching */ - desc->wHubCharacteristics = cpu_to_le16(temp); -} - -/*-------------------------------------------------------------------------*/ - -static int fusbh200_hub_control ( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) { - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - int ports = HCS_N_PORTS (fusbh200->hcs_params); - u32 __iomem *status_reg = &fusbh200->regs->port_status; - u32 temp, temp1, status; - unsigned long flags; - int retval = 0; - unsigned selector; - - /* - * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. - * HCS_INDICATOR may say we can change LEDs to off/amber/green. - * (track current state ourselves) ... blink for diagnostics, - * power, "this is the one", etc. EHCI spec supports this. - */ - - spin_lock_irqsave (&fusbh200->lock, flags); - switch (typeReq) { - case ClearHubFeature: - switch (wValue) { - case C_HUB_LOCAL_POWER: - case C_HUB_OVER_CURRENT: - /* no hub-wide feature/status flags */ - break; - default: - goto error; - } - break; - case ClearPortFeature: - if (!wIndex || wIndex > ports) - goto error; - wIndex--; - temp = fusbh200_readl(fusbh200, status_reg); - temp &= ~PORT_RWC_BITS; - - /* - * Even if OWNER is set, so the port is owned by the - * companion controller, hub_wq needs to be able to clear - * the port-change status bits (especially - * USB_PORT_STAT_C_CONNECTION). - */ - - switch (wValue) { - case USB_PORT_FEAT_ENABLE: - fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg); - break; - case USB_PORT_FEAT_C_ENABLE: - fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg); - break; - case USB_PORT_FEAT_SUSPEND: - if (temp & PORT_RESET) - goto error; - if (!(temp & PORT_SUSPEND)) - break; - if ((temp & PORT_PE) == 0) - goto error; - - fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg); - fusbh200->reset_done[wIndex] = jiffies - + msecs_to_jiffies(USB_RESUME_TIMEOUT); - break; - case USB_PORT_FEAT_C_SUSPEND: - clear_bit(wIndex, &fusbh200->port_c_suspend); - break; - case USB_PORT_FEAT_C_CONNECTION: - fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg); - break; - case USB_PORT_FEAT_C_OVER_CURRENT: - fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr); - break; - case USB_PORT_FEAT_C_RESET: - /* GetPortStatus clears reset */ - break; - default: - goto error; - } - fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted write */ - break; - case GetHubDescriptor: - fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *) - buf); - break; - case GetHubStatus: - /* no hub-wide feature/status flags */ - memset (buf, 0, 4); - //cpu_to_le32s ((u32 *) buf); - break; - case GetPortStatus: - if (!wIndex || wIndex > ports) - goto error; - wIndex--; - status = 0; - temp = fusbh200_readl(fusbh200, status_reg); - - // wPortChange bits - if (temp & PORT_CSC) - status |= USB_PORT_STAT_C_CONNECTION << 16; - if (temp & PORT_PEC) - status |= USB_PORT_STAT_C_ENABLE << 16; - - temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr); - if (temp1 & BMISR_OVC) - status |= USB_PORT_STAT_C_OVERCURRENT << 16; - - /* whoever resumes must GetPortStatus to complete it!! */ - if (temp & PORT_RESUME) { - - /* Remote Wakeup received? */ - if (!fusbh200->reset_done[wIndex]) { - /* resume signaling for 20 msec */ - fusbh200->reset_done[wIndex] = jiffies - + msecs_to_jiffies(20); - /* check the port again */ - mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer, - fusbh200->reset_done[wIndex]); - } - - /* resume completed? */ - else if (time_after_eq(jiffies, - fusbh200->reset_done[wIndex])) { - clear_bit(wIndex, &fusbh200->suspended_ports); - set_bit(wIndex, &fusbh200->port_c_suspend); - fusbh200->reset_done[wIndex] = 0; - - /* stop resume signaling */ - temp = fusbh200_readl(fusbh200, status_reg); - fusbh200_writel(fusbh200, - temp & ~(PORT_RWC_BITS | PORT_RESUME), - status_reg); - clear_bit(wIndex, &fusbh200->resuming_ports); - retval = handshake(fusbh200, status_reg, - PORT_RESUME, 0, 2000 /* 2msec */); - if (retval != 0) { - fusbh200_err(fusbh200, - "port %d resume error %d\n", - wIndex + 1, retval); - goto error; - } - temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); - } - } - - /* whoever resets must GetPortStatus to complete it!! */ - if ((temp & PORT_RESET) - && time_after_eq(jiffies, - fusbh200->reset_done[wIndex])) { - status |= USB_PORT_STAT_C_RESET << 16; - fusbh200->reset_done [wIndex] = 0; - clear_bit(wIndex, &fusbh200->resuming_ports); - - /* force reset to complete */ - fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET), - status_reg); - /* REVISIT: some hardware needs 550+ usec to clear - * this bit; seems too long to spin routinely... - */ - retval = handshake(fusbh200, status_reg, - PORT_RESET, 0, 1000); - if (retval != 0) { - fusbh200_err (fusbh200, "port %d reset error %d\n", - wIndex + 1, retval); - goto error; - } - - /* see what we found out */ - temp = check_reset_complete (fusbh200, wIndex, status_reg, - fusbh200_readl(fusbh200, status_reg)); - } - - if (!(temp & (PORT_RESUME|PORT_RESET))) { - fusbh200->reset_done[wIndex] = 0; - clear_bit(wIndex, &fusbh200->resuming_ports); - } - - /* transfer dedicated ports to the companion hc */ - if ((temp & PORT_CONNECT) && - test_bit(wIndex, &fusbh200->companion_ports)) { - temp &= ~PORT_RWC_BITS; - fusbh200_writel(fusbh200, temp, status_reg); - fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1); - temp = fusbh200_readl(fusbh200, status_reg); - } - - /* - * Even if OWNER is set, there's no harm letting hub_wq - * see the wPortStatus values (they should all be 0 except - * for PORT_POWER anyway). - */ - - if (temp & PORT_CONNECT) { - status |= USB_PORT_STAT_CONNECTION; - status |= fusbh200_port_speed(fusbh200, temp); - } - if (temp & PORT_PE) - status |= USB_PORT_STAT_ENABLE; - - /* maybe the port was unsuspended without our knowledge */ - if (temp & (PORT_SUSPEND|PORT_RESUME)) { - status |= USB_PORT_STAT_SUSPEND; - } else if (test_bit(wIndex, &fusbh200->suspended_ports)) { - clear_bit(wIndex, &fusbh200->suspended_ports); - clear_bit(wIndex, &fusbh200->resuming_ports); - fusbh200->reset_done[wIndex] = 0; - if (temp & PORT_PE) - set_bit(wIndex, &fusbh200->port_c_suspend); - } - - temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr); - if (temp1 & BMISR_OVC) - status |= USB_PORT_STAT_OVERCURRENT; - if (temp & PORT_RESET) - status |= USB_PORT_STAT_RESET; - if (test_bit(wIndex, &fusbh200->port_c_suspend)) - status |= USB_PORT_STAT_C_SUSPEND << 16; - - if (status & ~0xffff) /* only if wPortChange is interesting */ - dbg_port(fusbh200, "GetStatus", wIndex + 1, temp); - put_unaligned_le32(status, buf); - break; - case SetHubFeature: - switch (wValue) { - case C_HUB_LOCAL_POWER: - case C_HUB_OVER_CURRENT: - /* no hub-wide feature/status flags */ - break; - default: - goto error; - } - break; - case SetPortFeature: - selector = wIndex >> 8; - wIndex &= 0xff; - - if (!wIndex || wIndex > ports) - goto error; - wIndex--; - temp = fusbh200_readl(fusbh200, status_reg); - temp &= ~PORT_RWC_BITS; - switch (wValue) { - case USB_PORT_FEAT_SUSPEND: - if ((temp & PORT_PE) == 0 - || (temp & PORT_RESET) != 0) - goto error; - - /* After above check the port must be connected. - * Set appropriate bit thus could put phy into low power - * mode if we have hostpc feature - */ - fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg); - set_bit(wIndex, &fusbh200->suspended_ports); - break; - case USB_PORT_FEAT_RESET: - if (temp & PORT_RESUME) - goto error; - /* line status bits may report this as low speed, - * which can be fine if this root hub has a - * transaction translator built in. - */ - fusbh200_dbg(fusbh200, "port %d reset\n", wIndex + 1); - temp |= PORT_RESET; - temp &= ~PORT_PE; - - /* - * caller must wait, then call GetPortStatus - * usb 2.0 spec says 50 ms resets on root - */ - fusbh200->reset_done [wIndex] = jiffies - + msecs_to_jiffies (50); - fusbh200_writel(fusbh200, temp, status_reg); - break; - - /* For downstream facing ports (these): one hub port is put - * into test mode according to USB2 11.24.2.13, then the hub - * must be reset (which for root hub now means rmmod+modprobe, - * or else system reboot). See EHCI 2.3.9 and 4.14 for info - * about the EHCI-specific stuff. - */ - case USB_PORT_FEAT_TEST: - if (!selector || selector > 5) - goto error; - spin_unlock_irqrestore(&fusbh200->lock, flags); - fusbh200_quiesce(fusbh200); - spin_lock_irqsave(&fusbh200->lock, flags); - - /* Put all enabled ports into suspend */ - temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS; - if (temp & PORT_PE) - fusbh200_writel(fusbh200, temp | PORT_SUSPEND, - status_reg); - - spin_unlock_irqrestore(&fusbh200->lock, flags); - fusbh200_halt(fusbh200); - spin_lock_irqsave(&fusbh200->lock, flags); - - temp = fusbh200_readl(fusbh200, status_reg); - temp |= selector << 16; - fusbh200_writel(fusbh200, temp, status_reg); - break; - - default: - goto error; - } - fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */ - break; - - default: -error: - /* "stall" on error */ - retval = -EPIPE; - } - spin_unlock_irqrestore (&fusbh200->lock, flags); - return retval; -} - -static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd, - int portnum) -{ - return; -} - -static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd, - int portnum) -{ - return 0; -} -/*-------------------------------------------------------------------------*/ -/* - * There's basically three types of memory: - * - data used only by the HCD ... kmalloc is fine - * - async and periodic schedules, shared by HC and HCD ... these - * need to use dma_pool or dma_alloc_coherent - * - driver buffers, read/written by HC ... single shot DMA mapped - * - * There's also "register" data (e.g. PCI or SOC), which is memory mapped. - * No memory seen by this driver is pageable. - */ - -/*-------------------------------------------------------------------------*/ - -/* Allocate the key transfer structures from the previously allocated pool */ - -static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, - dma_addr_t dma) -{ - memset (qtd, 0, sizeof *qtd); - qtd->qtd_dma = dma; - qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT); - qtd->hw_next = FUSBH200_LIST_END(fusbh200); - qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200); - INIT_LIST_HEAD (&qtd->qtd_list); -} - -static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags) -{ - struct fusbh200_qtd *qtd; - dma_addr_t dma; - - qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma); - if (qtd != NULL) { - fusbh200_qtd_init(fusbh200, qtd, dma); - } - return qtd; -} - -static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd) -{ - dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma); -} - - -static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - /* clean qtds first, and know this is not linked */ - if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { - fusbh200_dbg (fusbh200, "unused qh not empty!\n"); - BUG (); - } - if (qh->dummy) - fusbh200_qtd_free (fusbh200, qh->dummy); - dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma); - kfree(qh); -} - -static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags) -{ - struct fusbh200_qh *qh; - dma_addr_t dma; - - qh = kzalloc(sizeof *qh, GFP_ATOMIC); - if (!qh) - goto done; - qh->hw = (struct fusbh200_qh_hw *) - dma_pool_alloc(fusbh200->qh_pool, flags, &dma); - if (!qh->hw) - goto fail; - memset(qh->hw, 0, sizeof *qh->hw); - qh->qh_dma = dma; - // INIT_LIST_HEAD (&qh->qh_list); - INIT_LIST_HEAD (&qh->qtd_list); - - /* dummy td enables safe urb queuing */ - qh->dummy = fusbh200_qtd_alloc (fusbh200, flags); - if (qh->dummy == NULL) { - fusbh200_dbg (fusbh200, "no dummy td\n"); - goto fail1; - } -done: - return qh; -fail1: - dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma); -fail: - kfree(qh); - return NULL; -} - -/*-------------------------------------------------------------------------*/ - -/* The queue heads and transfer descriptors are managed from pools tied - * to each of the "per device" structures. - * This is the initialisation and cleanup code. - */ - -static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200) -{ - if (fusbh200->async) - qh_destroy(fusbh200, fusbh200->async); - fusbh200->async = NULL; - - if (fusbh200->dummy) - qh_destroy(fusbh200, fusbh200->dummy); - fusbh200->dummy = NULL; - - /* DMA consistent memory and pools */ - if (fusbh200->qtd_pool) - dma_pool_destroy (fusbh200->qtd_pool); - fusbh200->qtd_pool = NULL; - - if (fusbh200->qh_pool) { - dma_pool_destroy (fusbh200->qh_pool); - fusbh200->qh_pool = NULL; - } - - if (fusbh200->itd_pool) - dma_pool_destroy (fusbh200->itd_pool); - fusbh200->itd_pool = NULL; - - if (fusbh200->periodic) - dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller, - fusbh200->periodic_size * sizeof (u32), - fusbh200->periodic, fusbh200->periodic_dma); - fusbh200->periodic = NULL; - - /* shadow periodic table */ - kfree(fusbh200->pshadow); - fusbh200->pshadow = NULL; -} - -/* remember to add cleanup code (above) if you add anything here */ -static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags) -{ - int i; - - /* QTDs for control/bulk/intr transfers */ - fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd", - fusbh200_to_hcd(fusbh200)->self.controller, - sizeof (struct fusbh200_qtd), - 32 /* byte alignment (for hw parts) */, - 4096 /* can't cross 4K */); - if (!fusbh200->qtd_pool) { - goto fail; - } - - /* QHs for control/bulk/intr transfers */ - fusbh200->qh_pool = dma_pool_create ("fusbh200_qh", - fusbh200_to_hcd(fusbh200)->self.controller, - sizeof(struct fusbh200_qh_hw), - 32 /* byte alignment (for hw parts) */, - 4096 /* can't cross 4K */); - if (!fusbh200->qh_pool) { - goto fail; - } - fusbh200->async = fusbh200_qh_alloc (fusbh200, flags); - if (!fusbh200->async) { - goto fail; - } - - /* ITD for high speed ISO transfers */ - fusbh200->itd_pool = dma_pool_create ("fusbh200_itd", - fusbh200_to_hcd(fusbh200)->self.controller, - sizeof (struct fusbh200_itd), - 64 /* byte alignment (for hw parts) */, - 4096 /* can't cross 4K */); - if (!fusbh200->itd_pool) { - goto fail; - } - - /* Hardware periodic table */ - fusbh200->periodic = (__le32 *) - dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller, - fusbh200->periodic_size * sizeof(__le32), - &fusbh200->periodic_dma, 0); - if (fusbh200->periodic == NULL) { - goto fail; - } - - for (i = 0; i < fusbh200->periodic_size; i++) - fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200); - - /* software shadow of hardware table */ - fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags); - if (fusbh200->pshadow != NULL) - return 0; - -fail: - fusbh200_dbg (fusbh200, "couldn't init memory\n"); - fusbh200_mem_cleanup (fusbh200); - return -ENOMEM; -} -/*-------------------------------------------------------------------------*/ -/* - * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. - * - * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" - * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned - * buffers needed for the larger number). We use one QH per endpoint, queue - * multiple urbs (all three types) per endpoint. URBs may need several qtds. - * - * ISO traffic uses "ISO TD" (itd) records, and (along with - * interrupts) needs careful scheduling. Performance improvements can be - * an ongoing challenge. That's in "ehci-sched.c". - * - * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, - * or otherwise through transaction translators (TTs) in USB 2.0 hubs using - * (b) special fields in qh entries or (c) split iso entries. TTs will - * buffer low/full speed data so the host collects it at high speed. - */ - -/*-------------------------------------------------------------------------*/ - -/* fill a qtd, returning how much of the buffer we were able to queue up */ - -static int -qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf, - size_t len, int token, int maxpacket) -{ - int i, count; - u64 addr = buf; - - /* one buffer entry per 4K ... first might be short or unaligned */ - qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr); - qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32)); - count = 0x1000 - (buf & 0x0fff); /* rest of that page */ - if (likely (len < count)) /* ... iff needed */ - count = len; - else { - buf += 0x1000; - buf &= ~0x0fff; - - /* per-qtd limit: from 16K to 20K (best alignment) */ - for (i = 1; count < len && i < 5; i++) { - addr = buf; - qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr); - qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200, - (u32)(addr >> 32)); - buf += 0x1000; - if ((count + 0x1000) < len) - count += 0x1000; - else - count = len; - } - - /* short packets may only terminate transfers */ - if (count != len) - count -= (count % maxpacket); - } - qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token); - qtd->length = count; - - return count; -} - -/*-------------------------------------------------------------------------*/ - -static inline void -qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd) -{ - struct fusbh200_qh_hw *hw = qh->hw; - - /* writes to an active overlay are unsafe */ - BUG_ON(qh->qh_state != QH_STATE_IDLE); - - hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma); - hw->hw_alt_next = FUSBH200_LIST_END(fusbh200); - - /* Except for control endpoints, we make hardware maintain data - * toggle (like OHCI) ... here (re)initialize the toggle in the QH, - * and set the pseudo-toggle in udev. Only usb_clear_halt() will - * ever clear it. - */ - if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) { - unsigned is_out, epnum; - - is_out = qh->is_out; - epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f; - if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { - hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE); - usb_settoggle (qh->dev, epnum, is_out, 1); - } - } - - hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING); -} - -/* if it weren't for a common silicon quirk (writing the dummy into the qh - * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault - * recovery (including urb dequeue) would need software changes to a QH... - */ -static void -qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - struct fusbh200_qtd *qtd; - - if (list_empty (&qh->qtd_list)) - qtd = qh->dummy; - else { - qtd = list_entry (qh->qtd_list.next, - struct fusbh200_qtd, qtd_list); - /* - * first qtd may already be partially processed. - * If we come here during unlink, the QH overlay region - * might have reference to the just unlinked qtd. The - * qtd is updated in qh_completions(). Update the QH - * overlay here. - */ - if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) { - qh->hw->hw_qtd_next = qtd->hw_next; - qtd = NULL; - } - } - - if (qtd) - qh_update (fusbh200, qh, qtd); -} - -/*-------------------------------------------------------------------------*/ - -static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh); - -static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd); - struct fusbh200_qh *qh = ep->hcpriv; - unsigned long flags; - - spin_lock_irqsave(&fusbh200->lock, flags); - qh->clearing_tt = 0; - if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) - && fusbh200->rh_state == FUSBH200_RH_RUNNING) - qh_link_async(fusbh200, qh); - spin_unlock_irqrestore(&fusbh200->lock, flags); -} - -static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, - struct urb *urb, u32 token) -{ - - /* If an async split transaction gets an error or is unlinked, - * the TT buffer may be left in an indeterminate state. We - * have to clear the TT buffer. - * - * Note: this routine is never called for Isochronous transfers. - */ - if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { - struct usb_device *tt = urb->dev->tt->hub; - - dev_dbg(&tt->dev, - "clear tt buffer port %d, a%d ep%d t%08x\n", - urb->dev->ttport, urb->dev->devnum, - usb_pipeendpoint(urb->pipe), token); - - if (urb->dev->tt->hub != - fusbh200_to_hcd(fusbh200)->self.root_hub) { - if (usb_hub_clear_tt_buffer(urb) == 0) - qh->clearing_tt = 1; - } - } -} - -static int qtd_copy_status ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - size_t length, - u32 token -) -{ - int status = -EINPROGRESS; - - /* count IN/OUT bytes, not SETUP (even short packets) */ - if (likely (QTD_PID (token) != 2)) - urb->actual_length += length - QTD_LENGTH (token); - - /* don't modify error codes */ - if (unlikely(urb->unlinked)) - return status; - - /* force cleanup after short read; not always an error */ - if (unlikely (IS_SHORT_READ (token))) - status = -EREMOTEIO; - - /* serious "can't proceed" faults reported by the hardware */ - if (token & QTD_STS_HALT) { - if (token & QTD_STS_BABBLE) { - /* FIXME "must" disable babbling device's port too */ - status = -EOVERFLOW; - /* CERR nonzero + halt --> stall */ - } else if (QTD_CERR(token)) { - status = -EPIPE; - - /* In theory, more than one of the following bits can be set - * since they are sticky and the transaction is retried. - * Which to test first is rather arbitrary. - */ - } else if (token & QTD_STS_MMF) { - /* fs/ls interrupt xfer missed the complete-split */ - status = -EPROTO; - } else if (token & QTD_STS_DBE) { - status = (QTD_PID (token) == 1) /* IN ? */ - ? -ENOSR /* hc couldn't read data */ - : -ECOMM; /* hc couldn't write data */ - } else if (token & QTD_STS_XACT) { - /* timeout, bad CRC, wrong PID, etc */ - fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n", - urb->dev->devpath, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out"); - status = -EPROTO; - } else { /* unknown */ - status = -EPROTO; - } - - fusbh200_dbg(fusbh200, - "dev%d ep%d%s qtd token %08x --> status %d\n", - usb_pipedevice (urb->pipe), - usb_pipeendpoint (urb->pipe), - usb_pipein (urb->pipe) ? "in" : "out", - token, status); - } - - return status; -} - -static void -fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status) -__releases(fusbh200->lock) -__acquires(fusbh200->lock) -{ - if (likely (urb->hcpriv != NULL)) { - struct fusbh200_qh *qh = (struct fusbh200_qh *) urb->hcpriv; - - /* S-mask in a QH means it's an interrupt urb */ - if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) { - - /* ... update hc-wide periodic stats (for usbfs) */ - fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--; - } - } - - if (unlikely(urb->unlinked)) { - COUNT(fusbh200->stats.unlink); - } else { - /* report non-error and short read status as zero */ - if (status == -EINPROGRESS || status == -EREMOTEIO) - status = 0; - COUNT(fusbh200->stats.complete); - } - -#ifdef FUSBH200_URB_TRACE - fusbh200_dbg (fusbh200, - "%s %s urb %p ep%d%s status %d len %d/%d\n", - __func__, urb->dev->devpath, urb, - usb_pipeendpoint (urb->pipe), - usb_pipein (urb->pipe) ? "in" : "out", - status, - urb->actual_length, urb->transfer_buffer_length); -#endif - - /* complete() can reenter this HCD */ - usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb); - spin_unlock (&fusbh200->lock); - usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status); - spin_lock (&fusbh200->lock); -} - -static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh); - -/* - * Process and free completed qtds for a qh, returning URBs to drivers. - * Chases up to qh->hw_current. Returns number of completions called, - * indicating how much "real" work we did. - */ -static unsigned -qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - struct fusbh200_qtd *last, *end = qh->dummy; - struct list_head *entry, *tmp; - int last_status; - int stopped; - unsigned count = 0; - u8 state; - struct fusbh200_qh_hw *hw = qh->hw; - - if (unlikely (list_empty (&qh->qtd_list))) - return count; - - /* completions (or tasks on other cpus) must never clobber HALT - * till we've gone through and cleaned everything up, even when - * they add urbs to this qh's queue or mark them for unlinking. - * - * NOTE: unlinking expects to be done in queue order. - * - * It's a bug for qh->qh_state to be anything other than - * QH_STATE_IDLE, unless our caller is scan_async() or - * scan_intr(). - */ - state = qh->qh_state; - qh->qh_state = QH_STATE_COMPLETING; - stopped = (state == QH_STATE_IDLE); - - rescan: - last = NULL; - last_status = -EINPROGRESS; - qh->needs_rescan = 0; - - /* remove de-activated QTDs from front of queue. - * after faults (including short reads), cleanup this urb - * then let the queue advance. - * if queue is stopped, handles unlinks. - */ - list_for_each_safe (entry, tmp, &qh->qtd_list) { - struct fusbh200_qtd *qtd; - struct urb *urb; - u32 token = 0; - - qtd = list_entry (entry, struct fusbh200_qtd, qtd_list); - urb = qtd->urb; - - /* clean up any state from previous QTD ...*/ - if (last) { - if (likely (last->urb != urb)) { - fusbh200_urb_done(fusbh200, last->urb, last_status); - count++; - last_status = -EINPROGRESS; - } - fusbh200_qtd_free (fusbh200, last); - last = NULL; - } - - /* ignore urbs submitted during completions we reported */ - if (qtd == end) - break; - - /* hardware copies qtd out of qh overlay */ - rmb (); - token = hc32_to_cpu(fusbh200, qtd->hw_token); - - /* always clean up qtds the hc de-activated */ - retry_xacterr: - if ((token & QTD_STS_ACTIVE) == 0) { - - /* Report Data Buffer Error: non-fatal but useful */ - if (token & QTD_STS_DBE) - fusbh200_dbg(fusbh200, - "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", - urb, - usb_endpoint_num(&urb->ep->desc), - usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", - urb->transfer_buffer_length, - qtd, - qh); - - /* on STALL, error, and short reads this urb must - * complete and all its qtds must be recycled. - */ - if ((token & QTD_STS_HALT) != 0) { - - /* retry transaction errors until we - * reach the software xacterr limit - */ - if ((token & QTD_STS_XACT) && - QTD_CERR(token) == 0 && - ++qh->xacterrs < QH_XACTERR_MAX && - !urb->unlinked) { - fusbh200_dbg(fusbh200, - "detected XactErr len %zu/%zu retry %d\n", - qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); - - /* reset the token in the qtd and the - * qh overlay (which still contains - * the qtd) so that we pick up from - * where we left off - */ - token &= ~QTD_STS_HALT; - token |= QTD_STS_ACTIVE | - (FUSBH200_TUNE_CERR << 10); - qtd->hw_token = cpu_to_hc32(fusbh200, - token); - wmb(); - hw->hw_token = cpu_to_hc32(fusbh200, - token); - goto retry_xacterr; - } - stopped = 1; - - /* magic dummy for some short reads; qh won't advance. - * that silicon quirk can kick in with this dummy too. - * - * other short reads won't stop the queue, including - * control transfers (status stage handles that) or - * most other single-qtd reads ... the queue stops if - * URB_SHORT_NOT_OK was set so the driver submitting - * the urbs could clean it up. - */ - } else if (IS_SHORT_READ (token) - && !(qtd->hw_alt_next - & FUSBH200_LIST_END(fusbh200))) { - stopped = 1; - } - - /* stop scanning when we reach qtds the hc is using */ - } else if (likely (!stopped - && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) { - break; - - /* scan the whole queue for unlinks whenever it stops */ - } else { - stopped = 1; - - /* cancel everything if we halt, suspend, etc */ - if (fusbh200->rh_state < FUSBH200_RH_RUNNING) - last_status = -ESHUTDOWN; - - /* this qtd is active; skip it unless a previous qtd - * for its urb faulted, or its urb was canceled. - */ - else if (last_status == -EINPROGRESS && !urb->unlinked) - continue; - - /* qh unlinked; token in overlay may be most current */ - if (state == QH_STATE_IDLE - && cpu_to_hc32(fusbh200, qtd->qtd_dma) - == hw->hw_current) { - token = hc32_to_cpu(fusbh200, hw->hw_token); - - /* An unlink may leave an incomplete - * async transaction in the TT buffer. - * We have to clear it. - */ - fusbh200_clear_tt_buffer(fusbh200, qh, urb, token); - } - } - - /* unless we already know the urb's status, collect qtd status - * and update count of bytes transferred. in common short read - * cases with only one data qtd (including control transfers), - * queue processing won't halt. but with two or more qtds (for - * example, with a 32 KB transfer), when the first qtd gets a - * short read the second must be removed by hand. - */ - if (last_status == -EINPROGRESS) { - last_status = qtd_copy_status(fusbh200, urb, - qtd->length, token); - if (last_status == -EREMOTEIO - && (qtd->hw_alt_next - & FUSBH200_LIST_END(fusbh200))) - last_status = -EINPROGRESS; - - /* As part of low/full-speed endpoint-halt processing - * we must clear the TT buffer (11.17.5). - */ - if (unlikely(last_status != -EINPROGRESS && - last_status != -EREMOTEIO)) { - /* The TT's in some hubs malfunction when they - * receive this request following a STALL (they - * stop sending isochronous packets). Since a - * STALL can't leave the TT buffer in a busy - * state (if you believe Figures 11-48 - 11-51 - * in the USB 2.0 spec), we won't clear the TT - * buffer in this case. Strictly speaking this - * is a violation of the spec. - */ - if (last_status != -EPIPE) - fusbh200_clear_tt_buffer(fusbh200, qh, urb, - token); - } - } - - /* if we're removing something not at the queue head, - * patch the hardware queue pointer. - */ - if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { - last = list_entry (qtd->qtd_list.prev, - struct fusbh200_qtd, qtd_list); - last->hw_next = qtd->hw_next; - } - - /* remove qtd; it's recycled after possible urb completion */ - list_del (&qtd->qtd_list); - last = qtd; - - /* reinit the xacterr counter for the next qtd */ - qh->xacterrs = 0; - } - - /* last urb's completion might still need calling */ - if (likely (last != NULL)) { - fusbh200_urb_done(fusbh200, last->urb, last_status); - count++; - fusbh200_qtd_free (fusbh200, last); - } - - /* Do we need to rescan for URBs dequeued during a giveback? */ - if (unlikely(qh->needs_rescan)) { - /* If the QH is already unlinked, do the rescan now. */ - if (state == QH_STATE_IDLE) - goto rescan; - - /* Otherwise we have to wait until the QH is fully unlinked. - * Our caller will start an unlink if qh->needs_rescan is - * set. But if an unlink has already started, nothing needs - * to be done. - */ - if (state != QH_STATE_LINKED) - qh->needs_rescan = 0; - } - - /* restore original state; caller must unlink or relink */ - qh->qh_state = state; - - /* be sure the hardware's done with the qh before refreshing - * it after fault cleanup, or recovering from silicon wrongly - * overlaying the dummy qtd (which reduces DMA chatter). - */ - if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) { - switch (state) { - case QH_STATE_IDLE: - qh_refresh(fusbh200, qh); - break; - case QH_STATE_LINKED: - /* We won't refresh a QH that's linked (after the HC - * stopped the queue). That avoids a race: - * - HC reads first part of QH; - * - CPU updates that first part and the token; - * - HC reads rest of that QH, including token - * Result: HC gets an inconsistent image, and then - * DMAs to/from the wrong memory (corrupting it). - * - * That should be rare for interrupt transfers, - * except maybe high bandwidth ... - */ - - /* Tell the caller to start an unlink */ - qh->needs_rescan = 1; - break; - /* otherwise, unlink already started */ - } - } - - return count; -} - -/*-------------------------------------------------------------------------*/ - -// high bandwidth multiplier, as encoded in highspeed endpoint descriptors -#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -// ... and packet size, for any kind of endpoint descriptor -#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) - -/* - * reverse of qh_urb_transaction: free a list of TDs. - * used for cleanup after errors, before HC sees an URB's TDs. - */ -static void qtd_list_free ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct list_head *qtd_list -) { - struct list_head *entry, *temp; - - list_for_each_safe (entry, temp, qtd_list) { - struct fusbh200_qtd *qtd; - - qtd = list_entry (entry, struct fusbh200_qtd, qtd_list); - list_del (&qtd->qtd_list); - fusbh200_qtd_free (fusbh200, qtd); - } -} - -/* - * create a list of filled qtds for this URB; won't link into qh. - */ -static struct list_head * -qh_urb_transaction ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct list_head *head, - gfp_t flags -) { - struct fusbh200_qtd *qtd, *qtd_prev; - dma_addr_t buf; - int len, this_sg_len, maxpacket; - int is_input; - u32 token; - int i; - struct scatterlist *sg; - - /* - * URBs map to sequences of QTDs: one logical transaction - */ - qtd = fusbh200_qtd_alloc (fusbh200, flags); - if (unlikely (!qtd)) - return NULL; - list_add_tail (&qtd->qtd_list, head); - qtd->urb = urb; - - token = QTD_STS_ACTIVE; - token |= (FUSBH200_TUNE_CERR << 10); - /* for split transactions, SplitXState initialized to zero */ - - len = urb->transfer_buffer_length; - is_input = usb_pipein (urb->pipe); - if (usb_pipecontrol (urb->pipe)) { - /* SETUP pid */ - qtd_fill(fusbh200, qtd, urb->setup_dma, - sizeof (struct usb_ctrlrequest), - token | (2 /* "setup" */ << 8), 8); - - /* ... and always at least one more pid */ - token ^= QTD_TOGGLE; - qtd_prev = qtd; - qtd = fusbh200_qtd_alloc (fusbh200, flags); - if (unlikely (!qtd)) - goto cleanup; - qtd->urb = urb; - qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma); - list_add_tail (&qtd->qtd_list, head); - - /* for zero length DATA stages, STATUS is always IN */ - if (len == 0) - token |= (1 /* "in" */ << 8); - } - - /* - * data transfer stage: buffer setup - */ - i = urb->num_mapped_sgs; - if (len > 0 && i > 0) { - sg = urb->sg; - buf = sg_dma_address(sg); - - /* urb->transfer_buffer_length may be smaller than the - * size of the scatterlist (or vice versa) - */ - this_sg_len = min_t(int, sg_dma_len(sg), len); - } else { - sg = NULL; - buf = urb->transfer_dma; - this_sg_len = len; - } - - if (is_input) - token |= (1 /* "in" */ << 8); - /* else it's already initted to "out" pid (0 << 8) */ - - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); - - /* - * buffer gets wrapped in one or more qtds; - * last one may be "short" (including zero len) - * and may serve as a control status ack - */ - for (;;) { - int this_qtd_len; - - this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token, - maxpacket); - this_sg_len -= this_qtd_len; - len -= this_qtd_len; - buf += this_qtd_len; - - /* - * short reads advance to a "magic" dummy instead of the next - * qtd ... that forces the queue to stop, for manual cleanup. - * (this will usually be overridden later.) - */ - if (is_input) - qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next; - - /* qh makes control packets use qtd toggle; maybe switch it */ - if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) - token ^= QTD_TOGGLE; - - if (likely(this_sg_len <= 0)) { - if (--i <= 0 || len <= 0) - break; - sg = sg_next(sg); - buf = sg_dma_address(sg); - this_sg_len = min_t(int, sg_dma_len(sg), len); - } - - qtd_prev = qtd; - qtd = fusbh200_qtd_alloc (fusbh200, flags); - if (unlikely (!qtd)) - goto cleanup; - qtd->urb = urb; - qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma); - list_add_tail (&qtd->qtd_list, head); - } - - /* - * unless the caller requires manual cleanup after short reads, - * have the alt_next mechanism keep the queue running after the - * last data qtd (the only one, for control and most other cases). - */ - if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 - || usb_pipecontrol (urb->pipe))) - qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200); - - /* - * control requests may need a terminating data "status" ack; - * other OUT ones may need a terminating short packet - * (zero length). - */ - if (likely (urb->transfer_buffer_length != 0)) { - int one_more = 0; - - if (usb_pipecontrol (urb->pipe)) { - one_more = 1; - token ^= 0x0100; /* "in" <--> "out" */ - token |= QTD_TOGGLE; /* force DATA1 */ - } else if (usb_pipeout(urb->pipe) - && (urb->transfer_flags & URB_ZERO_PACKET) - && !(urb->transfer_buffer_length % maxpacket)) { - one_more = 1; - } - if (one_more) { - qtd_prev = qtd; - qtd = fusbh200_qtd_alloc (fusbh200, flags); - if (unlikely (!qtd)) - goto cleanup; - qtd->urb = urb; - qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma); - list_add_tail (&qtd->qtd_list, head); - - /* never any data in such packets */ - qtd_fill(fusbh200, qtd, 0, 0, token, 0); - } - } - - /* by default, enable interrupt on urb completion */ - if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) - qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC); - return head; - -cleanup: - qtd_list_free (fusbh200, urb, head); - return NULL; -} - -/*-------------------------------------------------------------------------*/ - -// Would be best to create all qh's from config descriptors, -// when each interface/altsetting is established. Unlink -// any previous qh and cancel its urbs first; endpoints are -// implicitly reset then (data toggle too). -// That'd mean updating how usbcore talks to HCDs. (2.7?) - - -/* - * Each QH holds a qtd list; a QH is used for everything except iso. - * - * For interrupt urbs, the scheduler must set the microframe scheduling - * mask(s) each time the QH gets scheduled. For highspeed, that's - * just one microframe in the s-mask. For split interrupt transactions - * there are additional complications: c-mask, maybe FSTNs. - */ -static struct fusbh200_qh * -qh_make ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - gfp_t flags -) { - struct fusbh200_qh *qh = fusbh200_qh_alloc (fusbh200, flags); - u32 info1 = 0, info2 = 0; - int is_input, type; - int maxp = 0; - struct usb_tt *tt = urb->dev->tt; - struct fusbh200_qh_hw *hw; - - if (!qh) - return qh; - - /* - * init endpoint/device data for this QH - */ - info1 |= usb_pipeendpoint (urb->pipe) << 8; - info1 |= usb_pipedevice (urb->pipe) << 0; - - is_input = usb_pipein (urb->pipe); - type = usb_pipetype (urb->pipe); - maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); - - /* 1024 byte maxpacket is a hardware ceiling. High bandwidth - * acts like up to 3KB, but is built from smaller packets. - */ - if (max_packet(maxp) > 1024) { - fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp)); - goto done; - } - - /* Compute interrupt scheduling parameters just once, and save. - * - allowing for high bandwidth, how many nsec/uframe are used? - * - split transactions need a second CSPLIT uframe; same question - * - splits also need a schedule gap (for full/low speed I/O) - * - qh has a polling interval - * - * For control/bulk requests, the HC or TT handles these. - */ - if (type == PIPE_INTERRUPT) { - qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, - is_input, 0, - hb_mult(maxp) * max_packet(maxp))); - qh->start = NO_FRAME; - - if (urb->dev->speed == USB_SPEED_HIGH) { - qh->c_usecs = 0; - qh->gap_uf = 0; - - qh->period = urb->interval >> 3; - if (qh->period == 0 && urb->interval != 1) { - /* NOTE interval 2 or 4 uframes could work. - * But interval 1 scheduling is simpler, and - * includes high bandwidth. - */ - urb->interval = 1; - } else if (qh->period > fusbh200->periodic_size) { - qh->period = fusbh200->periodic_size; - urb->interval = qh->period << 3; - } - } else { - int think_time; - - /* gap is f(FS/LS transfer times) */ - qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, - is_input, 0, maxp) / (125 * 1000); - - /* FIXME this just approximates SPLIT/CSPLIT times */ - if (is_input) { // SPLIT, gap, CSPLIT+DATA - qh->c_usecs = qh->usecs + HS_USECS (0); - qh->usecs = HS_USECS (1); - } else { // SPLIT+DATA, gap, CSPLIT - qh->usecs += HS_USECS (1); - qh->c_usecs = HS_USECS (0); - } - - think_time = tt ? tt->think_time : 0; - qh->tt_usecs = NS_TO_US (think_time + - usb_calc_bus_time (urb->dev->speed, - is_input, 0, max_packet (maxp))); - qh->period = urb->interval; - if (qh->period > fusbh200->periodic_size) { - qh->period = fusbh200->periodic_size; - urb->interval = qh->period; - } - } - } - - /* support for tt scheduling, and access to toggles */ - qh->dev = urb->dev; - - /* using TT? */ - switch (urb->dev->speed) { - case USB_SPEED_LOW: - info1 |= QH_LOW_SPEED; - /* FALL THROUGH */ - - case USB_SPEED_FULL: - /* EPS 0 means "full" */ - if (type != PIPE_INTERRUPT) - info1 |= (FUSBH200_TUNE_RL_TT << 28); - if (type == PIPE_CONTROL) { - info1 |= QH_CONTROL_EP; /* for TT */ - info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ - } - info1 |= maxp << 16; - - info2 |= (FUSBH200_TUNE_MULT_TT << 30); - - /* Some Freescale processors have an erratum in which the - * port number in the queue head was 0..N-1 instead of 1..N. - */ - if (fusbh200_has_fsl_portno_bug(fusbh200)) - info2 |= (urb->dev->ttport-1) << 23; - else - info2 |= urb->dev->ttport << 23; - - /* set the address of the TT; for TDI's integrated - * root hub tt, leave it zeroed. - */ - if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub) - info2 |= tt->hub->devnum << 16; - - /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ - - break; - - case USB_SPEED_HIGH: /* no TT involved */ - info1 |= QH_HIGH_SPEED; - if (type == PIPE_CONTROL) { - info1 |= (FUSBH200_TUNE_RL_HS << 28); - info1 |= 64 << 16; /* usb2 fixed maxpacket */ - info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ - info2 |= (FUSBH200_TUNE_MULT_HS << 30); - } else if (type == PIPE_BULK) { - info1 |= (FUSBH200_TUNE_RL_HS << 28); - /* The USB spec says that high speed bulk endpoints - * always use 512 byte maxpacket. But some device - * vendors decided to ignore that, and MSFT is happy - * to help them do so. So now people expect to use - * such nonconformant devices with Linux too; sigh. - */ - info1 |= max_packet(maxp) << 16; - info2 |= (FUSBH200_TUNE_MULT_HS << 30); - } else { /* PIPE_INTERRUPT */ - info1 |= max_packet (maxp) << 16; - info2 |= hb_mult (maxp) << 30; - } - break; - default: - fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev, - urb->dev->speed); -done: - qh_destroy(fusbh200, qh); - return NULL; - } - - /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ - - /* init as live, toggle clear, advance to dummy */ - qh->qh_state = QH_STATE_IDLE; - hw = qh->hw; - hw->hw_info1 = cpu_to_hc32(fusbh200, info1); - hw->hw_info2 = cpu_to_hc32(fusbh200, info2); - qh->is_out = !is_input; - usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); - qh_refresh (fusbh200, qh); - return qh; -} - -/*-------------------------------------------------------------------------*/ - -static void enable_async(struct fusbh200_hcd *fusbh200) -{ - if (fusbh200->async_count++) - return; - - /* Stop waiting to turn off the async schedule */ - fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC); - - /* Don't start the schedule until ASS is 0 */ - fusbh200_poll_ASS(fusbh200); - turn_on_io_watchdog(fusbh200); -} - -static void disable_async(struct fusbh200_hcd *fusbh200) -{ - if (--fusbh200->async_count) - return; - - /* The async schedule and async_unlink list are supposed to be empty */ - WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink); - - /* Don't turn off the schedule until ASS is 1 */ - fusbh200_poll_ASS(fusbh200); -} - -/* move qh (and its qtds) onto async queue; maybe enable queue. */ - -static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - __hc32 dma = QH_NEXT(fusbh200, qh->qh_dma); - struct fusbh200_qh *head; - - /* Don't link a QH if there's a Clear-TT-Buffer pending */ - if (unlikely(qh->clearing_tt)) - return; - - WARN_ON(qh->qh_state != QH_STATE_IDLE); - - /* clear halt and/or toggle; and maybe recover from silicon quirk */ - qh_refresh(fusbh200, qh); - - /* splice right after start */ - head = fusbh200->async; - qh->qh_next = head->qh_next; - qh->hw->hw_next = head->hw->hw_next; - wmb (); - - head->qh_next.qh = qh; - head->hw->hw_next = dma; - - qh->xacterrs = 0; - qh->qh_state = QH_STATE_LINKED; - /* qtd completions reported later by interrupt */ - - enable_async(fusbh200); -} - -/*-------------------------------------------------------------------------*/ - -/* - * For control/bulk/interrupt, return QH with these TDs appended. - * Allocates and initializes the QH if necessary. - * Returns null if it can't allocate a QH it needs to. - * If the QH has TDs (urbs) already, that's great. - */ -static struct fusbh200_qh *qh_append_tds ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct list_head *qtd_list, - int epnum, - void **ptr -) -{ - struct fusbh200_qh *qh = NULL; - __hc32 qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f); - - qh = (struct fusbh200_qh *) *ptr; - if (unlikely (qh == NULL)) { - /* can't sleep here, we have fusbh200->lock... */ - qh = qh_make (fusbh200, urb, GFP_ATOMIC); - *ptr = qh; - } - if (likely (qh != NULL)) { - struct fusbh200_qtd *qtd; - - if (unlikely (list_empty (qtd_list))) - qtd = NULL; - else - qtd = list_entry (qtd_list->next, struct fusbh200_qtd, - qtd_list); - - /* control qh may need patching ... */ - if (unlikely (epnum == 0)) { - - /* usb_reset_device() briefly reverts to address 0 */ - if (usb_pipedevice (urb->pipe) == 0) - qh->hw->hw_info1 &= ~qh_addr_mask; - } - - /* just one way to queue requests: swap with the dummy qtd. - * only hc or qh_refresh() ever modify the overlay. - */ - if (likely (qtd != NULL)) { - struct fusbh200_qtd *dummy; - dma_addr_t dma; - __hc32 token; - - /* to avoid racing the HC, use the dummy td instead of - * the first td of our list (becomes new dummy). both - * tds stay deactivated until we're done, when the - * HC is allowed to fetch the old dummy (4.10.2). - */ - token = qtd->hw_token; - qtd->hw_token = HALT_BIT(fusbh200); - - dummy = qh->dummy; - - dma = dummy->qtd_dma; - *dummy = *qtd; - dummy->qtd_dma = dma; - - list_del (&qtd->qtd_list); - list_add (&dummy->qtd_list, qtd_list); - list_splice_tail(qtd_list, &qh->qtd_list); - - fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma); - qh->dummy = qtd; - - /* hc must see the new dummy at list end */ - dma = qtd->qtd_dma; - qtd = list_entry (qh->qtd_list.prev, - struct fusbh200_qtd, qtd_list); - qtd->hw_next = QTD_NEXT(fusbh200, dma); - - /* let the hc process these next qtds */ - wmb (); - dummy->hw_token = token; - - urb->hcpriv = qh; - } - } - return qh; -} - -/*-------------------------------------------------------------------------*/ - -static int -submit_async ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct list_head *qtd_list, - gfp_t mem_flags -) { - int epnum; - unsigned long flags; - struct fusbh200_qh *qh = NULL; - int rc; - - epnum = urb->ep->desc.bEndpointAddress; - -#ifdef FUSBH200_URB_TRACE - { - struct fusbh200_qtd *qtd; - qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list); - fusbh200_dbg(fusbh200, - "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", - __func__, urb->dev->devpath, urb, - epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", - urb->transfer_buffer_length, - qtd, urb->ep->hcpriv); - } -#endif - - spin_lock_irqsave (&fusbh200->lock, flags); - if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) { - rc = -ESHUTDOWN; - goto done; - } - rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb); - if (unlikely(rc)) - goto done; - - qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv); - if (unlikely(qh == NULL)) { - usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb); - rc = -ENOMEM; - goto done; - } - - /* Control/bulk operations through TTs don't need scheduling, - * the HC and TT handle it when the TT has a buffer ready. - */ - if (likely (qh->qh_state == QH_STATE_IDLE)) - qh_link_async(fusbh200, qh); - done: - spin_unlock_irqrestore (&fusbh200->lock, flags); - if (unlikely (qh == NULL)) - qtd_list_free (fusbh200, urb, qtd_list); - return rc; -} - -/*-------------------------------------------------------------------------*/ - -static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - struct fusbh200_qh *prev; - - /* Add to the end of the list of QHs waiting for the next IAAD */ - qh->qh_state = QH_STATE_UNLINK; - if (fusbh200->async_unlink) - fusbh200->async_unlink_last->unlink_next = qh; - else - fusbh200->async_unlink = qh; - fusbh200->async_unlink_last = qh; - - /* Unlink it from the schedule */ - prev = fusbh200->async; - while (prev->qh_next.qh != qh) - prev = prev->qh_next.qh; - - prev->hw->hw_next = qh->hw->hw_next; - prev->qh_next = qh->qh_next; - if (fusbh200->qh_scan_next == qh) - fusbh200->qh_scan_next = qh->qh_next.qh; -} - -static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested) -{ - /* - * Do nothing if an IAA cycle is already running or - * if one will be started shortly. - */ - if (fusbh200->async_iaa || fusbh200->async_unlinking) - return; - - /* Do all the waiting QHs at once */ - fusbh200->async_iaa = fusbh200->async_unlink; - fusbh200->async_unlink = NULL; - - /* If the controller isn't running, we don't have to wait for it */ - if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) { - if (!nested) /* Avoid recursion */ - end_unlink_async(fusbh200); - - /* Otherwise start a new IAA cycle */ - } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) { - /* Make sure the unlinks are all visible to the hardware */ - wmb(); - - fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD, - &fusbh200->regs->command); - fusbh200_readl(fusbh200, &fusbh200->regs->command); - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true); - } -} - -/* the async qh for the qtds being unlinked are now gone from the HC */ - -static void end_unlink_async(struct fusbh200_hcd *fusbh200) -{ - struct fusbh200_qh *qh; - - /* Process the idle QHs */ - restart: - fusbh200->async_unlinking = true; - while (fusbh200->async_iaa) { - qh = fusbh200->async_iaa; - fusbh200->async_iaa = qh->unlink_next; - qh->unlink_next = NULL; - - qh->qh_state = QH_STATE_IDLE; - qh->qh_next.qh = NULL; - - qh_completions(fusbh200, qh); - if (!list_empty(&qh->qtd_list) && - fusbh200->rh_state == FUSBH200_RH_RUNNING) - qh_link_async(fusbh200, qh); - disable_async(fusbh200); - } - fusbh200->async_unlinking = false; - - /* Start a new IAA cycle if any QHs are waiting for it */ - if (fusbh200->async_unlink) { - start_iaa_cycle(fusbh200, true); - if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) - goto restart; - } -} - -static void unlink_empty_async(struct fusbh200_hcd *fusbh200) -{ - struct fusbh200_qh *qh, *next; - bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING); - bool check_unlinks_later = false; - - /* Unlink all the async QHs that have been empty for a timer cycle */ - next = fusbh200->async->qh_next.qh; - while (next) { - qh = next; - next = qh->qh_next.qh; - - if (list_empty(&qh->qtd_list) && - qh->qh_state == QH_STATE_LINKED) { - if (!stopped && qh->unlink_cycle == - fusbh200->async_unlink_cycle) - check_unlinks_later = true; - else - single_unlink_async(fusbh200, qh); - } - } - - /* Start a new IAA cycle if any QHs are waiting for it */ - if (fusbh200->async_unlink) - start_iaa_cycle(fusbh200, false); - - /* QHs that haven't been empty for long enough will be handled later */ - if (check_unlinks_later) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true); - ++fusbh200->async_unlink_cycle; - } -} - -/* makes sure the async qh will become idle */ -/* caller must own fusbh200->lock */ - -static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - /* - * If the QH isn't linked then there's nothing we can do - * unless we were called during a giveback, in which case - * qh_completions() has to deal with it. - */ - if (qh->qh_state != QH_STATE_LINKED) { - if (qh->qh_state == QH_STATE_COMPLETING) - qh->needs_rescan = 1; - return; - } - - single_unlink_async(fusbh200, qh); - start_iaa_cycle(fusbh200, false); -} - -/*-------------------------------------------------------------------------*/ - -static void scan_async (struct fusbh200_hcd *fusbh200) -{ - struct fusbh200_qh *qh; - bool check_unlinks_later = false; - - fusbh200->qh_scan_next = fusbh200->async->qh_next.qh; - while (fusbh200->qh_scan_next) { - qh = fusbh200->qh_scan_next; - fusbh200->qh_scan_next = qh->qh_next.qh; - rescan: - /* clean any finished work for this qh */ - if (!list_empty(&qh->qtd_list)) { - int temp; - - /* - * Unlinks could happen here; completion reporting - * drops the lock. That's why fusbh200->qh_scan_next - * always holds the next qh to scan; if the next qh - * gets unlinked then fusbh200->qh_scan_next is adjusted - * in single_unlink_async(). - */ - temp = qh_completions(fusbh200, qh); - if (qh->needs_rescan) { - start_unlink_async(fusbh200, qh); - } else if (list_empty(&qh->qtd_list) - && qh->qh_state == QH_STATE_LINKED) { - qh->unlink_cycle = fusbh200->async_unlink_cycle; - check_unlinks_later = true; - } else if (temp != 0) - goto rescan; - } - } - - /* - * Unlink empty entries, reducing DMA usage as well - * as HCD schedule-scanning costs. Delay for any qh - * we just scanned, there's a not-unusual case that it - * doesn't stay idle for long. - */ - if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING && - !(fusbh200->enabled_hrtimer_events & - BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true); - ++fusbh200->async_unlink_cycle; - } -} -/*-------------------------------------------------------------------------*/ -/* - * EHCI scheduled transaction support: interrupt, iso, split iso - * These are called "periodic" transactions in the EHCI spec. - * - * Note that for interrupt transfers, the QH/QTD manipulation is shared - * with the "asynchronous" transaction support (control/bulk transfers). - * The only real difference is in how interrupt transfers are scheduled. - * - * For ISO, we make an "iso_stream" head to serve the same role as a QH. - * It keeps track of every ITD (or SITD) that's linked, and holds enough - * pre-calculated schedule data to make appending to the queue be quick. - */ - -static int fusbh200_get_frame (struct usb_hcd *hcd); - -/*-------------------------------------------------------------------------*/ - -/* - * periodic_next_shadow - return "next" pointer on shadow list - * @periodic: host pointer to qh/itd - * @tag: hardware tag for type of this record - */ -static union fusbh200_shadow * -periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic, - __hc32 tag) -{ - switch (hc32_to_cpu(fusbh200, tag)) { - case Q_TYPE_QH: - return &periodic->qh->qh_next; - case Q_TYPE_FSTN: - return &periodic->fstn->fstn_next; - default: - return &periodic->itd->itd_next; - } -} - -static __hc32 * -shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic, - __hc32 tag) -{ - switch (hc32_to_cpu(fusbh200, tag)) { - /* our fusbh200_shadow.qh is actually software part */ - case Q_TYPE_QH: - return &periodic->qh->hw->hw_next; - /* others are hw parts */ - default: - return periodic->hw_next; - } -} - -/* caller must hold fusbh200->lock */ -static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr) -{ - union fusbh200_shadow *prev_p = &fusbh200->pshadow[frame]; - __hc32 *hw_p = &fusbh200->periodic[frame]; - union fusbh200_shadow here = *prev_p; - - /* find predecessor of "ptr"; hw and shadow lists are in sync */ - while (here.ptr && here.ptr != ptr) { - prev_p = periodic_next_shadow(fusbh200, prev_p, - Q_NEXT_TYPE(fusbh200, *hw_p)); - hw_p = shadow_next_periodic(fusbh200, &here, - Q_NEXT_TYPE(fusbh200, *hw_p)); - here = *prev_p; - } - /* an interrupt entry (at list end) could have been shared */ - if (!here.ptr) - return; - - /* update shadow and hardware lists ... the old "next" pointers - * from ptr may still be in use, the caller updates them. - */ - *prev_p = *periodic_next_shadow(fusbh200, &here, - Q_NEXT_TYPE(fusbh200, *hw_p)); - - *hw_p = *shadow_next_periodic(fusbh200, &here, - Q_NEXT_TYPE(fusbh200, *hw_p)); -} - -/* how many of the uframe's 125 usecs are allocated? */ -static unsigned short -periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe) -{ - __hc32 *hw_p = &fusbh200->periodic [frame]; - union fusbh200_shadow *q = &fusbh200->pshadow [frame]; - unsigned usecs = 0; - struct fusbh200_qh_hw *hw; - - while (q->ptr) { - switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) { - case Q_TYPE_QH: - hw = q->qh->hw; - /* is it in the S-mask? */ - if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe)) - usecs += q->qh->usecs; - /* ... or C-mask? */ - if (hw->hw_info2 & cpu_to_hc32(fusbh200, - 1 << (8 + uframe))) - usecs += q->qh->c_usecs; - hw_p = &hw->hw_next; - q = &q->qh->qh_next; - break; - // case Q_TYPE_FSTN: - default: - /* for "save place" FSTNs, count the relevant INTR - * bandwidth from the previous frame - */ - if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) { - fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n"); - } - hw_p = &q->fstn->hw_next; - q = &q->fstn->fstn_next; - break; - case Q_TYPE_ITD: - if (q->itd->hw_transaction[uframe]) - usecs += q->itd->stream->usecs; - hw_p = &q->itd->hw_next; - q = &q->itd->itd_next; - break; - } - } - if (usecs > fusbh200->uframe_periodic_max) - fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n", - frame * 8 + uframe, usecs); - return usecs; -} - -/*-------------------------------------------------------------------------*/ - -static int same_tt (struct usb_device *dev1, struct usb_device *dev2) -{ - if (!dev1->tt || !dev2->tt) - return 0; - if (dev1->tt != dev2->tt) - return 0; - if (dev1->tt->multi) - return dev1->ttport == dev2->ttport; - else - return 1; -} - -/* return true iff the device's transaction translator is available - * for a periodic transfer starting at the specified frame, using - * all the uframes in the mask. - */ -static int tt_no_collision ( - struct fusbh200_hcd *fusbh200, - unsigned period, - struct usb_device *dev, - unsigned frame, - u32 uf_mask -) -{ - if (period == 0) /* error */ - return 0; - - /* note bandwidth wastage: split never follows csplit - * (different dev or endpoint) until the next uframe. - * calling convention doesn't make that distinction. - */ - for (; frame < fusbh200->periodic_size; frame += period) { - union fusbh200_shadow here; - __hc32 type; - struct fusbh200_qh_hw *hw; - - here = fusbh200->pshadow [frame]; - type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]); - while (here.ptr) { - switch (hc32_to_cpu(fusbh200, type)) { - case Q_TYPE_ITD: - type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next); - here = here.itd->itd_next; - continue; - case Q_TYPE_QH: - hw = here.qh->hw; - if (same_tt (dev, here.qh->dev)) { - u32 mask; - - mask = hc32_to_cpu(fusbh200, - hw->hw_info2); - /* "knows" no gap is needed */ - mask |= mask >> 8; - if (mask & uf_mask) - break; - } - type = Q_NEXT_TYPE(fusbh200, hw->hw_next); - here = here.qh->qh_next; - continue; - // case Q_TYPE_FSTN: - default: - fusbh200_dbg (fusbh200, - "periodic frame %d bogus type %d\n", - frame, type); - } - - /* collision or error */ - return 0; - } - } - - /* no collision */ - return 1; -} - -/*-------------------------------------------------------------------------*/ - -static void enable_periodic(struct fusbh200_hcd *fusbh200) -{ - if (fusbh200->periodic_count++) - return; - - /* Stop waiting to turn off the periodic schedule */ - fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC); - - /* Don't start the schedule until PSS is 0 */ - fusbh200_poll_PSS(fusbh200); - turn_on_io_watchdog(fusbh200); -} - -static void disable_periodic(struct fusbh200_hcd *fusbh200) -{ - if (--fusbh200->periodic_count) - return; - - /* Don't turn off the schedule until PSS is 1 */ - fusbh200_poll_PSS(fusbh200); -} - -/*-------------------------------------------------------------------------*/ - -/* periodic schedule slots have iso tds (normal or split) first, then a - * sparse tree for active interrupt transfers. - * - * this just links in a qh; caller guarantees uframe masks are set right. - * no FSTN support (yet; fusbh200 0.96+) - */ -static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - unsigned i; - unsigned period = qh->period; - - dev_dbg (&qh->dev->dev, - "link qh%d-%04x/%p start %d [%d/%d us]\n", - period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2) - & (QH_CMASK | QH_SMASK), - qh, qh->start, qh->usecs, qh->c_usecs); - - /* high bandwidth, or otherwise every microframe */ - if (period == 0) - period = 1; - - for (i = qh->start; i < fusbh200->periodic_size; i += period) { - union fusbh200_shadow *prev = &fusbh200->pshadow[i]; - __hc32 *hw_p = &fusbh200->periodic[i]; - union fusbh200_shadow here = *prev; - __hc32 type = 0; - - /* skip the iso nodes at list head */ - while (here.ptr) { - type = Q_NEXT_TYPE(fusbh200, *hw_p); - if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH)) - break; - prev = periodic_next_shadow(fusbh200, prev, type); - hw_p = shadow_next_periodic(fusbh200, &here, type); - here = *prev; - } - - /* sorting each branch by period (slow-->fast) - * enables sharing interior tree nodes - */ - while (here.ptr && qh != here.qh) { - if (qh->period > here.qh->period) - break; - prev = &here.qh->qh_next; - hw_p = &here.qh->hw->hw_next; - here = *prev; - } - /* link in this qh, unless some earlier pass did that */ - if (qh != here.qh) { - qh->qh_next = here; - if (here.qh) - qh->hw->hw_next = *hw_p; - wmb (); - prev->qh = qh; - *hw_p = QH_NEXT (fusbh200, qh->qh_dma); - } - } - qh->qh_state = QH_STATE_LINKED; - qh->xacterrs = 0; - - /* update per-qh bandwidth for usbfs */ - fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period - ? ((qh->usecs + qh->c_usecs) / qh->period) - : (qh->usecs * 8); - - list_add(&qh->intr_node, &fusbh200->intr_qh_list); - - /* maybe enable periodic schedule processing */ - ++fusbh200->intr_count; - enable_periodic(fusbh200); -} - -static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - unsigned i; - unsigned period; - - /* - * If qh is for a low/full-speed device, simply unlinking it - * could interfere with an ongoing split transaction. To unlink - * it safely would require setting the QH_INACTIVATE bit and - * waiting at least one frame, as described in EHCI 4.12.2.5. - * - * We won't bother with any of this. Instead, we assume that the - * only reason for unlinking an interrupt QH while the current URB - * is still active is to dequeue all the URBs (flush the whole - * endpoint queue). - * - * If rebalancing the periodic schedule is ever implemented, this - * approach will no longer be valid. - */ - - /* high bandwidth, or otherwise part of every microframe */ - if ((period = qh->period) == 0) - period = 1; - - for (i = qh->start; i < fusbh200->periodic_size; i += period) - periodic_unlink (fusbh200, i, qh); - - /* update per-qh bandwidth for usbfs */ - fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period - ? ((qh->usecs + qh->c_usecs) / qh->period) - : (qh->usecs * 8); - - dev_dbg (&qh->dev->dev, - "unlink qh%d-%04x/%p start %d [%d/%d us]\n", - qh->period, - hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), - qh, qh->start, qh->usecs, qh->c_usecs); - - /* qh->qh_next still "live" to HC */ - qh->qh_state = QH_STATE_UNLINK; - qh->qh_next.ptr = NULL; - - if (fusbh200->qh_scan_next == qh) - fusbh200->qh_scan_next = list_entry(qh->intr_node.next, - struct fusbh200_qh, intr_node); - list_del(&qh->intr_node); -} - -static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - /* If the QH isn't linked then there's nothing we can do - * unless we were called during a giveback, in which case - * qh_completions() has to deal with it. - */ - if (qh->qh_state != QH_STATE_LINKED) { - if (qh->qh_state == QH_STATE_COMPLETING) - qh->needs_rescan = 1; - return; - } - - qh_unlink_periodic (fusbh200, qh); - - /* Make sure the unlinks are visible before starting the timer */ - wmb(); - - /* - * The EHCI spec doesn't say how long it takes the controller to - * stop accessing an unlinked interrupt QH. The timer delay is - * 9 uframes; presumably that will be long enough. - */ - qh->unlink_cycle = fusbh200->intr_unlink_cycle; - - /* New entries go at the end of the intr_unlink list */ - if (fusbh200->intr_unlink) - fusbh200->intr_unlink_last->unlink_next = qh; - else - fusbh200->intr_unlink = qh; - fusbh200->intr_unlink_last = qh; - - if (fusbh200->intr_unlinking) - ; /* Avoid recursive calls */ - else if (fusbh200->rh_state < FUSBH200_RH_RUNNING) - fusbh200_handle_intr_unlinks(fusbh200); - else if (fusbh200->intr_unlink == qh) { - fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true); - ++fusbh200->intr_unlink_cycle; - } -} - -static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - struct fusbh200_qh_hw *hw = qh->hw; - int rc; - - qh->qh_state = QH_STATE_IDLE; - hw->hw_next = FUSBH200_LIST_END(fusbh200); - - qh_completions(fusbh200, qh); - - /* reschedule QH iff another request is queued */ - if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) { - rc = qh_schedule(fusbh200, qh); - - /* An error here likely indicates handshake failure - * or no space left in the schedule. Neither fault - * should happen often ... - * - * FIXME kill the now-dysfunctional queued urbs - */ - if (rc != 0) - fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n", - qh, rc); - } - - /* maybe turn off periodic schedule */ - --fusbh200->intr_count; - disable_periodic(fusbh200); -} - -/*-------------------------------------------------------------------------*/ - -static int check_period ( - struct fusbh200_hcd *fusbh200, - unsigned frame, - unsigned uframe, - unsigned period, - unsigned usecs -) { - int claimed; - - /* complete split running into next frame? - * given FSTN support, we could sometimes check... - */ - if (uframe >= 8) - return 0; - - /* convert "usecs we need" to "max already claimed" */ - usecs = fusbh200->uframe_periodic_max - usecs; - - /* we "know" 2 and 4 uframe intervals were rejected; so - * for period 0, check _every_ microframe in the schedule. - */ - if (unlikely (period == 0)) { - do { - for (uframe = 0; uframe < 7; uframe++) { - claimed = periodic_usecs (fusbh200, frame, uframe); - if (claimed > usecs) - return 0; - } - } while ((frame += 1) < fusbh200->periodic_size); - - /* just check the specified uframe, at that period */ - } else { - do { - claimed = periodic_usecs (fusbh200, frame, uframe); - if (claimed > usecs) - return 0; - } while ((frame += period) < fusbh200->periodic_size); - } - - // success! - return 1; -} - -static int check_intr_schedule ( - struct fusbh200_hcd *fusbh200, - unsigned frame, - unsigned uframe, - const struct fusbh200_qh *qh, - __hc32 *c_maskp -) -{ - int retval = -ENOSPC; - u8 mask = 0; - - if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ - goto done; - - if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs)) - goto done; - if (!qh->c_usecs) { - retval = 0; - *c_maskp = 0; - goto done; - } - - /* Make sure this tt's buffer is also available for CSPLITs. - * We pessimize a bit; probably the typical full speed case - * doesn't need the second CSPLIT. - * - * NOTE: both SPLIT and CSPLIT could be checked in just - * one smart pass... - */ - mask = 0x03 << (uframe + qh->gap_uf); - *c_maskp = cpu_to_hc32(fusbh200, mask << 8); - - mask |= 1 << uframe; - if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) { - if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1, - qh->period, qh->c_usecs)) - goto done; - if (!check_period (fusbh200, frame, uframe + qh->gap_uf, - qh->period, qh->c_usecs)) - goto done; - retval = 0; - } -done: - return retval; -} - -/* "first fit" scheduling policy used the first time through, - * or when the previous schedule slot can't be re-used. - */ -static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh) -{ - int status; - unsigned uframe; - __hc32 c_mask; - unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ - struct fusbh200_qh_hw *hw = qh->hw; - - qh_refresh(fusbh200, qh); - hw->hw_next = FUSBH200_LIST_END(fusbh200); - frame = qh->start; - - /* reuse the previous schedule slots, if we can */ - if (frame < qh->period) { - uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK); - status = check_intr_schedule (fusbh200, frame, --uframe, - qh, &c_mask); - } else { - uframe = 0; - c_mask = 0; - status = -ENOSPC; - } - - /* else scan the schedule to find a group of slots such that all - * uframes have enough periodic bandwidth available. - */ - if (status) { - /* "normal" case, uframing flexible except with splits */ - if (qh->period) { - int i; - - for (i = qh->period; status && i > 0; --i) { - frame = ++fusbh200->random_frame % qh->period; - for (uframe = 0; uframe < 8; uframe++) { - status = check_intr_schedule (fusbh200, - frame, uframe, qh, - &c_mask); - if (status == 0) - break; - } - } - - /* qh->period == 0 means every uframe */ - } else { - frame = 0; - status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask); - } - if (status) - goto done; - qh->start = frame; - - /* reset S-frame and (maybe) C-frame masks */ - hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK)); - hw->hw_info2 |= qh->period - ? cpu_to_hc32(fusbh200, 1 << uframe) - : cpu_to_hc32(fusbh200, QH_SMASK); - hw->hw_info2 |= c_mask; - } else - fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh); - - /* stuff into the periodic schedule */ - qh_link_periodic(fusbh200, qh); -done: - return status; -} - -static int intr_submit ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct list_head *qtd_list, - gfp_t mem_flags -) { - unsigned epnum; - unsigned long flags; - struct fusbh200_qh *qh; - int status; - struct list_head empty; - - /* get endpoint and transfer/schedule data */ - epnum = urb->ep->desc.bEndpointAddress; - - spin_lock_irqsave (&fusbh200->lock, flags); - - if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) { - status = -ESHUTDOWN; - goto done_not_linked; - } - status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb); - if (unlikely(status)) - goto done_not_linked; - - /* get qh and force any scheduling errors */ - INIT_LIST_HEAD (&empty); - qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv); - if (qh == NULL) { - status = -ENOMEM; - goto done; - } - if (qh->qh_state == QH_STATE_IDLE) { - if ((status = qh_schedule (fusbh200, qh)) != 0) - goto done; - } - - /* then queue the urb's tds to the qh */ - qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv); - BUG_ON (qh == NULL); - - /* ... update usbfs periodic stats */ - fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++; - -done: - if (unlikely(status)) - usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb); -done_not_linked: - spin_unlock_irqrestore (&fusbh200->lock, flags); - if (status) - qtd_list_free (fusbh200, urb, qtd_list); - - return status; -} - -static void scan_intr(struct fusbh200_hcd *fusbh200) -{ - struct fusbh200_qh *qh; - - list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list, - intr_node) { - rescan: - /* clean any finished work for this qh */ - if (!list_empty(&qh->qtd_list)) { - int temp; - - /* - * Unlinks could happen here; completion reporting - * drops the lock. That's why fusbh200->qh_scan_next - * always holds the next qh to scan; if the next qh - * gets unlinked then fusbh200->qh_scan_next is adjusted - * in qh_unlink_periodic(). - */ - temp = qh_completions(fusbh200, qh); - if (unlikely(qh->needs_rescan || - (list_empty(&qh->qtd_list) && - qh->qh_state == QH_STATE_LINKED))) - start_unlink_intr(fusbh200, qh); - else if (temp != 0) - goto rescan; - } - } -} - -/*-------------------------------------------------------------------------*/ - -/* fusbh200_iso_stream ops work with both ITD and SITD */ - -static struct fusbh200_iso_stream * -iso_stream_alloc (gfp_t mem_flags) -{ - struct fusbh200_iso_stream *stream; - - stream = kzalloc(sizeof *stream, mem_flags); - if (likely (stream != NULL)) { - INIT_LIST_HEAD(&stream->td_list); - INIT_LIST_HEAD(&stream->free_list); - stream->next_uframe = -1; - } - return stream; -} - -static void -iso_stream_init ( - struct fusbh200_hcd *fusbh200, - struct fusbh200_iso_stream *stream, - struct usb_device *dev, - int pipe, - unsigned interval -) -{ - u32 buf1; - unsigned epnum, maxp; - int is_input; - long bandwidth; - unsigned multi; - - /* - * this might be a "high bandwidth" highspeed endpoint, - * as encoded in the ep descriptor's wMaxPacket field - */ - epnum = usb_pipeendpoint (pipe); - is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; - maxp = usb_maxpacket(dev, pipe, !is_input); - if (is_input) { - buf1 = (1 << 11); - } else { - buf1 = 0; - } - - maxp = max_packet(maxp); - multi = hb_mult(maxp); - buf1 |= maxp; - maxp *= multi; - - stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum); - stream->buf1 = cpu_to_hc32(fusbh200, buf1); - stream->buf2 = cpu_to_hc32(fusbh200, multi); - - /* usbfs wants to report the average usecs per frame tied up - * when transfers on this endpoint are scheduled ... - */ - if (dev->speed == USB_SPEED_FULL) { - interval <<= 3; - stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed, - is_input, 1, maxp)); - stream->usecs /= 8; - } else { - stream->highspeed = 1; - stream->usecs = HS_USECS_ISO (maxp); - } - bandwidth = stream->usecs * 8; - bandwidth /= interval; - - stream->bandwidth = bandwidth; - stream->udev = dev; - stream->bEndpointAddress = is_input | epnum; - stream->interval = interval; - stream->maxp = maxp; -} - -static struct fusbh200_iso_stream * -iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb) -{ - unsigned epnum; - struct fusbh200_iso_stream *stream; - struct usb_host_endpoint *ep; - unsigned long flags; - - epnum = usb_pipeendpoint (urb->pipe); - if (usb_pipein(urb->pipe)) - ep = urb->dev->ep_in[epnum]; - else - ep = urb->dev->ep_out[epnum]; - - spin_lock_irqsave (&fusbh200->lock, flags); - stream = ep->hcpriv; - - if (unlikely (stream == NULL)) { - stream = iso_stream_alloc(GFP_ATOMIC); - if (likely (stream != NULL)) { - ep->hcpriv = stream; - stream->ep = ep; - iso_stream_init(fusbh200, stream, urb->dev, urb->pipe, - urb->interval); - } - - /* if dev->ep [epnum] is a QH, hw is set */ - } else if (unlikely (stream->hw != NULL)) { - fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n", - urb->dev->devpath, epnum, - usb_pipein(urb->pipe) ? "in" : "out"); - stream = NULL; - } - - spin_unlock_irqrestore (&fusbh200->lock, flags); - return stream; -} - -/*-------------------------------------------------------------------------*/ - -/* fusbh200_iso_sched ops can be ITD-only or SITD-only */ - -static struct fusbh200_iso_sched * -iso_sched_alloc (unsigned packets, gfp_t mem_flags) -{ - struct fusbh200_iso_sched *iso_sched; - int size = sizeof *iso_sched; - - size += packets * sizeof (struct fusbh200_iso_packet); - iso_sched = kzalloc(size, mem_flags); - if (likely (iso_sched != NULL)) { - INIT_LIST_HEAD (&iso_sched->td_list); - } - return iso_sched; -} - -static inline void -itd_sched_init( - struct fusbh200_hcd *fusbh200, - struct fusbh200_iso_sched *iso_sched, - struct fusbh200_iso_stream *stream, - struct urb *urb -) -{ - unsigned i; - dma_addr_t dma = urb->transfer_dma; - - /* how many uframes are needed for these transfers */ - iso_sched->span = urb->number_of_packets * stream->interval; - - /* figure out per-uframe itd fields that we'll need later - * when we fit new itds into the schedule. - */ - for (i = 0; i < urb->number_of_packets; i++) { - struct fusbh200_iso_packet *uframe = &iso_sched->packet [i]; - unsigned length; - dma_addr_t buf; - u32 trans; - - length = urb->iso_frame_desc [i].length; - buf = dma + urb->iso_frame_desc [i].offset; - - trans = FUSBH200_ISOC_ACTIVE; - trans |= buf & 0x0fff; - if (unlikely (((i + 1) == urb->number_of_packets)) - && !(urb->transfer_flags & URB_NO_INTERRUPT)) - trans |= FUSBH200_ITD_IOC; - trans |= length << 16; - uframe->transaction = cpu_to_hc32(fusbh200, trans); - - /* might need to cross a buffer page within a uframe */ - uframe->bufp = (buf & ~(u64)0x0fff); - buf += length; - if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) - uframe->cross = 1; - } -} - -static void -iso_sched_free ( - struct fusbh200_iso_stream *stream, - struct fusbh200_iso_sched *iso_sched -) -{ - if (!iso_sched) - return; - // caller must hold fusbh200->lock! - list_splice (&iso_sched->td_list, &stream->free_list); - kfree (iso_sched); -} - -static int -itd_urb_transaction ( - struct fusbh200_iso_stream *stream, - struct fusbh200_hcd *fusbh200, - struct urb *urb, - gfp_t mem_flags -) -{ - struct fusbh200_itd *itd; - dma_addr_t itd_dma; - int i; - unsigned num_itds; - struct fusbh200_iso_sched *sched; - unsigned long flags; - - sched = iso_sched_alloc (urb->number_of_packets, mem_flags); - if (unlikely (sched == NULL)) - return -ENOMEM; - - itd_sched_init(fusbh200, sched, stream, urb); - - if (urb->interval < 8) - num_itds = 1 + (sched->span + 7) / 8; - else - num_itds = urb->number_of_packets; - - /* allocate/init ITDs */ - spin_lock_irqsave (&fusbh200->lock, flags); - for (i = 0; i < num_itds; i++) { - - /* - * Use iTDs from the free list, but not iTDs that may - * still be in use by the hardware. - */ - if (likely(!list_empty(&stream->free_list))) { - itd = list_first_entry(&stream->free_list, - struct fusbh200_itd, itd_list); - if (itd->frame == fusbh200->now_frame) - goto alloc_itd; - list_del (&itd->itd_list); - itd_dma = itd->itd_dma; - } else { - alloc_itd: - spin_unlock_irqrestore (&fusbh200->lock, flags); - itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags, - &itd_dma); - spin_lock_irqsave (&fusbh200->lock, flags); - if (!itd) { - iso_sched_free(stream, sched); - spin_unlock_irqrestore(&fusbh200->lock, flags); - return -ENOMEM; - } - } - - memset (itd, 0, sizeof *itd); - itd->itd_dma = itd_dma; - list_add (&itd->itd_list, &sched->td_list); - } - spin_unlock_irqrestore (&fusbh200->lock, flags); - - /* temporarily store schedule info in hcpriv */ - urb->hcpriv = sched; - urb->error_count = 0; - return 0; -} - -/*-------------------------------------------------------------------------*/ - -static inline int -itd_slot_ok ( - struct fusbh200_hcd *fusbh200, - u32 mod, - u32 uframe, - u8 usecs, - u32 period -) -{ - uframe %= period; - do { - /* can't commit more than uframe_periodic_max usec */ - if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7) - > (fusbh200->uframe_periodic_max - usecs)) - return 0; - - /* we know urb->interval is 2^N uframes */ - uframe += period; - } while (uframe < mod); - return 1; -} - -/* - * This scheduler plans almost as far into the future as it has actual - * periodic schedule slots. (Affected by TUNE_FLS, which defaults to - * "as small as possible" to be cache-friendlier.) That limits the size - * transfers you can stream reliably; avoid more than 64 msec per urb. - * Also avoid queue depths of less than fusbh200's worst irq latency (affected - * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, - * and other factors); or more than about 230 msec total (for portability, - * given FUSBH200_TUNE_FLS and the slop). Or, write a smarter scheduler! - */ - -#define SCHEDULE_SLOP 80 /* microframes */ - -static int -iso_stream_schedule ( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - struct fusbh200_iso_stream *stream -) -{ - u32 now, next, start, period, span; - int status; - unsigned mod = fusbh200->periodic_size << 3; - struct fusbh200_iso_sched *sched = urb->hcpriv; - - period = urb->interval; - span = sched->span; - - if (span > mod - SCHEDULE_SLOP) { - fusbh200_dbg (fusbh200, "iso request %p too long\n", urb); - status = -EFBIG; - goto fail; - } - - now = fusbh200_read_frame_index(fusbh200) & (mod - 1); - - /* Typical case: reuse current schedule, stream is still active. - * Hopefully there are no gaps from the host falling behind - * (irq delays etc), but if there are we'll take the next - * slot in the schedule, implicitly assuming URB_ISO_ASAP. - */ - if (likely (!list_empty (&stream->td_list))) { - u32 excess; - - /* For high speed devices, allow scheduling within the - * isochronous scheduling threshold. For full speed devices - * and Intel PCI-based controllers, don't (work around for - * Intel ICH9 bug). - */ - if (!stream->highspeed && fusbh200->fs_i_thresh) - next = now + fusbh200->i_thresh; - else - next = now; - - /* Fell behind (by up to twice the slop amount)? - * We decide based on the time of the last currently-scheduled - * slot, not the time of the next available slot. - */ - excess = (stream->next_uframe - period - next) & (mod - 1); - if (excess >= mod - 2 * SCHEDULE_SLOP) - start = next + excess - mod + period * - DIV_ROUND_UP(mod - excess, period); - else - start = next + excess + period; - if (start - now >= mod) { - fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n", - urb, start - now - period, period, - mod); - status = -EFBIG; - goto fail; - } - } - - /* need to schedule; when's the next (u)frame we could start? - * this is bigger than fusbh200->i_thresh allows; scheduling itself - * isn't free, the slop should handle reasonably slow cpus. it - * can also help high bandwidth if the dma and irq loads don't - * jump until after the queue is primed. - */ - else { - int done = 0; - start = SCHEDULE_SLOP + (now & ~0x07); - - /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ - - /* find a uframe slot with enough bandwidth. - * Early uframes are more precious because full-speed - * iso IN transfers can't use late uframes, - * and therefore they should be allocated last. - */ - next = start; - start += period; - do { - start--; - /* check schedule: enough space? */ - if (itd_slot_ok(fusbh200, mod, start, - stream->usecs, period)) - done = 1; - } while (start > next && !done); - - /* no room in the schedule */ - if (!done) { - fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n", - urb, now, now + mod); - status = -ENOSPC; - goto fail; - } - } - - /* Tried to schedule too far into the future? */ - if (unlikely(start - now + span - period - >= mod - 2 * SCHEDULE_SLOP)) { - fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n", - urb, start - now, span - period, - mod - 2 * SCHEDULE_SLOP); - status = -EFBIG; - goto fail; - } - - stream->next_uframe = start & (mod - 1); - - /* report high speed start in uframes; full speed, in frames */ - urb->start_frame = stream->next_uframe; - if (!stream->highspeed) - urb->start_frame >>= 3; - - /* Make sure scan_isoc() sees these */ - if (fusbh200->isoc_count == 0) - fusbh200->next_frame = now >> 3; - return 0; - - fail: - iso_sched_free(stream, sched); - urb->hcpriv = NULL; - return status; -} - -/*-------------------------------------------------------------------------*/ - -static inline void -itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream, - struct fusbh200_itd *itd) -{ - int i; - - /* it's been recently zeroed */ - itd->hw_next = FUSBH200_LIST_END(fusbh200); - itd->hw_bufp [0] = stream->buf0; - itd->hw_bufp [1] = stream->buf1; - itd->hw_bufp [2] = stream->buf2; - - for (i = 0; i < 8; i++) - itd->index[i] = -1; - - /* All other fields are filled when scheduling */ -} - -static inline void -itd_patch( - struct fusbh200_hcd *fusbh200, - struct fusbh200_itd *itd, - struct fusbh200_iso_sched *iso_sched, - unsigned index, - u16 uframe -) -{ - struct fusbh200_iso_packet *uf = &iso_sched->packet [index]; - unsigned pg = itd->pg; - - // BUG_ON (pg == 6 && uf->cross); - - uframe &= 0x07; - itd->index [uframe] = index; - - itd->hw_transaction[uframe] = uf->transaction; - itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12); - itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0); - itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32)); - - /* iso_frame_desc[].offset must be strictly increasing */ - if (unlikely (uf->cross)) { - u64 bufp = uf->bufp + 4096; - - itd->pg = ++pg; - itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0); - itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32)); - } -} - -static inline void -itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd) -{ - union fusbh200_shadow *prev = &fusbh200->pshadow[frame]; - __hc32 *hw_p = &fusbh200->periodic[frame]; - union fusbh200_shadow here = *prev; - __hc32 type = 0; - - /* skip any iso nodes which might belong to previous microframes */ - while (here.ptr) { - type = Q_NEXT_TYPE(fusbh200, *hw_p); - if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH)) - break; - prev = periodic_next_shadow(fusbh200, prev, type); - hw_p = shadow_next_periodic(fusbh200, &here, type); - here = *prev; - } - - itd->itd_next = here; - itd->hw_next = *hw_p; - prev->itd = itd; - itd->frame = frame; - wmb (); - *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD); -} - -/* fit urb's itds into the selected schedule slot; activate as needed */ -static void itd_link_urb( - struct fusbh200_hcd *fusbh200, - struct urb *urb, - unsigned mod, - struct fusbh200_iso_stream *stream -) -{ - int packet; - unsigned next_uframe, uframe, frame; - struct fusbh200_iso_sched *iso_sched = urb->hcpriv; - struct fusbh200_itd *itd; - - next_uframe = stream->next_uframe & (mod - 1); - - if (unlikely (list_empty(&stream->td_list))) { - fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated - += stream->bandwidth; - fusbh200_dbg(fusbh200, - "schedule devp %s ep%d%s-iso period %d start %d.%d\n", - urb->dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", - urb->interval, - next_uframe >> 3, next_uframe & 0x7); - } - - /* fill iTDs uframe by uframe */ - for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { - if (itd == NULL) { - /* ASSERT: we have all necessary itds */ - // BUG_ON (list_empty (&iso_sched->td_list)); - - /* ASSERT: no itds for this endpoint in this uframe */ - - itd = list_entry (iso_sched->td_list.next, - struct fusbh200_itd, itd_list); - list_move_tail (&itd->itd_list, &stream->td_list); - itd->stream = stream; - itd->urb = urb; - itd_init (fusbh200, stream, itd); - } - - uframe = next_uframe & 0x07; - frame = next_uframe >> 3; - - itd_patch(fusbh200, itd, iso_sched, packet, uframe); - - next_uframe += stream->interval; - next_uframe &= mod - 1; - packet++; - - /* link completed itds into the schedule */ - if (((next_uframe >> 3) != frame) - || packet == urb->number_of_packets) { - itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd); - itd = NULL; - } - } - stream->next_uframe = next_uframe; - - /* don't need that schedule data any more */ - iso_sched_free (stream, iso_sched); - urb->hcpriv = NULL; - - ++fusbh200->isoc_count; - enable_periodic(fusbh200); -} - -#define ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR) - -/* Process and recycle a completed ITD. Return true iff its urb completed, - * and hence its completion callback probably added things to the hardware - * schedule. - * - * Note that we carefully avoid recycling this descriptor until after any - * completion callback runs, so that it won't be reused quickly. That is, - * assuming (a) no more than two urbs per frame on this endpoint, and also - * (b) only this endpoint's completions submit URBs. It seems some silicon - * corrupts things if you reuse completed descriptors very quickly... - */ -static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd) -{ - struct urb *urb = itd->urb; - struct usb_iso_packet_descriptor *desc; - u32 t; - unsigned uframe; - int urb_index = -1; - struct fusbh200_iso_stream *stream = itd->stream; - struct usb_device *dev; - bool retval = false; - - /* for each uframe with a packet */ - for (uframe = 0; uframe < 8; uframe++) { - if (likely (itd->index[uframe] == -1)) - continue; - urb_index = itd->index[uframe]; - desc = &urb->iso_frame_desc [urb_index]; - - t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]); - itd->hw_transaction [uframe] = 0; - - /* report transfer status */ - if (unlikely (t & ISO_ERRS)) { - urb->error_count++; - if (t & FUSBH200_ISOC_BUF_ERR) - desc->status = usb_pipein (urb->pipe) - ? -ENOSR /* hc couldn't read */ - : -ECOMM; /* hc couldn't write */ - else if (t & FUSBH200_ISOC_BABBLE) - desc->status = -EOVERFLOW; - else /* (t & FUSBH200_ISOC_XACTERR) */ - desc->status = -EPROTO; - - /* HC need not update length with this error */ - if (!(t & FUSBH200_ISOC_BABBLE)) { - desc->actual_length = fusbh200_itdlen(urb, desc, t); - urb->actual_length += desc->actual_length; - } - } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) { - desc->status = 0; - desc->actual_length = fusbh200_itdlen(urb, desc, t); - urb->actual_length += desc->actual_length; - } else { - /* URB was too late */ - desc->status = -EXDEV; - } - } - - /* handle completion now? */ - if (likely ((urb_index + 1) != urb->number_of_packets)) - goto done; - - /* ASSERT: it's really the last itd for this urb - list_for_each_entry (itd, &stream->td_list, itd_list) - BUG_ON (itd->urb == urb); - */ - - /* give urb back to the driver; completion often (re)submits */ - dev = urb->dev; - fusbh200_urb_done(fusbh200, urb, 0); - retval = true; - urb = NULL; - - --fusbh200->isoc_count; - disable_periodic(fusbh200); - - if (unlikely(list_is_singular(&stream->td_list))) { - fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated - -= stream->bandwidth; - fusbh200_dbg(fusbh200, - "deschedule devp %s ep%d%s-iso\n", - dev->devpath, stream->bEndpointAddress & 0x0f, - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); - } - -done: - itd->urb = NULL; - - /* Add to the end of the free list for later reuse */ - list_move_tail(&itd->itd_list, &stream->free_list); - - /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ - if (list_empty(&stream->td_list)) { - list_splice_tail_init(&stream->free_list, - &fusbh200->cached_itd_list); - start_free_itds(fusbh200); - } - - return retval; -} - -/*-------------------------------------------------------------------------*/ - -static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb, - gfp_t mem_flags) -{ - int status = -EINVAL; - unsigned long flags; - struct fusbh200_iso_stream *stream; - - /* Get iso_stream head */ - stream = iso_stream_find (fusbh200, urb); - if (unlikely (stream == NULL)) { - fusbh200_dbg (fusbh200, "can't get iso stream\n"); - return -ENOMEM; - } - if (unlikely (urb->interval != stream->interval && - fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) { - fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n", - stream->interval, urb->interval); - goto done; - } - -#ifdef FUSBH200_URB_TRACE - fusbh200_dbg (fusbh200, - "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", - __func__, urb->dev->devpath, urb, - usb_pipeendpoint (urb->pipe), - usb_pipein (urb->pipe) ? "in" : "out", - urb->transfer_buffer_length, - urb->number_of_packets, urb->interval, - stream); -#endif - - /* allocate ITDs w/o locking anything */ - status = itd_urb_transaction (stream, fusbh200, urb, mem_flags); - if (unlikely (status < 0)) { - fusbh200_dbg (fusbh200, "can't init itds\n"); - goto done; - } - - /* schedule ... need to lock */ - spin_lock_irqsave (&fusbh200->lock, flags); - if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) { - status = -ESHUTDOWN; - goto done_not_linked; - } - status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb); - if (unlikely(status)) - goto done_not_linked; - status = iso_stream_schedule(fusbh200, urb, stream); - if (likely (status == 0)) - itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream); - else - usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb); - done_not_linked: - spin_unlock_irqrestore (&fusbh200->lock, flags); - done: - return status; -} - -/*-------------------------------------------------------------------------*/ - -static void scan_isoc(struct fusbh200_hcd *fusbh200) -{ - unsigned uf, now_frame, frame; - unsigned fmask = fusbh200->periodic_size - 1; - bool modified, live; - - /* - * When running, scan from last scan point up to "now" - * else clean up by scanning everything that's left. - * Touches as few pages as possible: cache-friendly. - */ - if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) { - uf = fusbh200_read_frame_index(fusbh200); - now_frame = (uf >> 3) & fmask; - live = true; - } else { - now_frame = (fusbh200->next_frame - 1) & fmask; - live = false; - } - fusbh200->now_frame = now_frame; - - frame = fusbh200->next_frame; - for (;;) { - union fusbh200_shadow q, *q_p; - __hc32 type, *hw_p; - -restart: - /* scan each element in frame's queue for completions */ - q_p = &fusbh200->pshadow [frame]; - hw_p = &fusbh200->periodic [frame]; - q.ptr = q_p->ptr; - type = Q_NEXT_TYPE(fusbh200, *hw_p); - modified = false; - - while (q.ptr != NULL) { - switch (hc32_to_cpu(fusbh200, type)) { - case Q_TYPE_ITD: - /* If this ITD is still active, leave it for - * later processing ... check the next entry. - * No need to check for activity unless the - * frame is current. - */ - if (frame == now_frame && live) { - rmb(); - for (uf = 0; uf < 8; uf++) { - if (q.itd->hw_transaction[uf] & - ITD_ACTIVE(fusbh200)) - break; - } - if (uf < 8) { - q_p = &q.itd->itd_next; - hw_p = &q.itd->hw_next; - type = Q_NEXT_TYPE(fusbh200, - q.itd->hw_next); - q = *q_p; - break; - } - } - - /* Take finished ITDs out of the schedule - * and process them: recycle, maybe report - * URB completion. HC won't cache the - * pointer for much longer, if at all. - */ - *q_p = q.itd->itd_next; - *hw_p = q.itd->hw_next; - type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next); - wmb(); - modified = itd_complete (fusbh200, q.itd); - q = *q_p; - break; - default: - fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n", - type, frame, q.ptr); - // BUG (); - /* FALL THROUGH */ - case Q_TYPE_QH: - case Q_TYPE_FSTN: - /* End of the iTDs and siTDs */ - q.ptr = NULL; - break; - } - - /* assume completion callbacks modify the queue */ - if (unlikely(modified && fusbh200->isoc_count > 0)) - goto restart; - } - - /* Stop when we have reached the current frame */ - if (frame == now_frame) - break; - frame = (frame + 1) & fmask; - } - fusbh200->next_frame = now_frame; -} -/*-------------------------------------------------------------------------*/ -/* - * Display / Set uframe_periodic_max - */ -static ssize_t show_uframe_periodic_max(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct fusbh200_hcd *fusbh200; - int n; - - fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev))); - n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max); - return n; -} - - -static ssize_t store_uframe_periodic_max(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct fusbh200_hcd *fusbh200; - unsigned uframe_periodic_max; - unsigned frame, uframe; - unsigned short allocated_max; - unsigned long flags; - ssize_t ret; - - fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev))); - if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) - return -EINVAL; - - if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { - fusbh200_info(fusbh200, "rejecting invalid request for " - "uframe_periodic_max=%u\n", uframe_periodic_max); - return -EINVAL; - } - - ret = -EINVAL; - - /* - * lock, so that our checking does not race with possible periodic - * bandwidth allocation through submitting new urbs. - */ - spin_lock_irqsave (&fusbh200->lock, flags); - - /* - * for request to decrease max periodic bandwidth, we have to check - * every microframe in the schedule to see whether the decrease is - * possible. - */ - if (uframe_periodic_max < fusbh200->uframe_periodic_max) { - allocated_max = 0; - - for (frame = 0; frame < fusbh200->periodic_size; ++frame) - for (uframe = 0; uframe < 7; ++uframe) - allocated_max = max(allocated_max, - periodic_usecs (fusbh200, frame, uframe)); - - if (allocated_max > uframe_periodic_max) { - fusbh200_info(fusbh200, - "cannot decrease uframe_periodic_max because " - "periodic bandwidth is already allocated " - "(%u > %u)\n", - allocated_max, uframe_periodic_max); - goto out_unlock; - } - } - - /* increasing is always ok */ - - fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% " - "(== %u usec/uframe)\n", - 100*uframe_periodic_max/125, uframe_periodic_max); - - if (uframe_periodic_max != 100) - fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n"); - - fusbh200->uframe_periodic_max = uframe_periodic_max; - ret = count; - -out_unlock: - spin_unlock_irqrestore (&fusbh200->lock, flags); - return ret; -} -static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max); - - -static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200) -{ - struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller; - int i = 0; - - if (i) - goto out; - - i = device_create_file(controller, &dev_attr_uframe_periodic_max); -out: - return i; -} - -static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200) -{ - struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller; - - device_remove_file(controller, &dev_attr_uframe_periodic_max); -} -/*-------------------------------------------------------------------------*/ - -/* On some systems, leaving remote wakeup enabled prevents system shutdown. - * The firmware seems to think that powering off is a wakeup event! - * This routine turns off remote wakeup and everything else, on all ports. - */ -static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200) -{ - u32 __iomem *status_reg = &fusbh200->regs->port_status; - - fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg); -} - -/* - * Halt HC, turn off all ports, and let the BIOS use the companion controllers. - * Must be called with interrupts enabled and the lock not held. - */ -static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200) -{ - fusbh200_halt(fusbh200); - - spin_lock_irq(&fusbh200->lock); - fusbh200->rh_state = FUSBH200_RH_HALTED; - fusbh200_turn_off_all_ports(fusbh200); - spin_unlock_irq(&fusbh200->lock); -} - -/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc). - * This forcibly disables dma and IRQs, helping kexec and other cases - * where the next system software may expect clean state. - */ -static void fusbh200_shutdown(struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd); - - spin_lock_irq(&fusbh200->lock); - fusbh200->shutdown = true; - fusbh200->rh_state = FUSBH200_RH_STOPPING; - fusbh200->enabled_hrtimer_events = 0; - spin_unlock_irq(&fusbh200->lock); - - fusbh200_silence_controller(fusbh200); - - hrtimer_cancel(&fusbh200->hrtimer); -} - -/*-------------------------------------------------------------------------*/ - -/* - * fusbh200_work is called from some interrupts, timers, and so on. - * it calls driver completion functions, after dropping fusbh200->lock. - */ -static void fusbh200_work (struct fusbh200_hcd *fusbh200) -{ - /* another CPU may drop fusbh200->lock during a schedule scan while - * it reports urb completions. this flag guards against bogus - * attempts at re-entrant schedule scanning. - */ - if (fusbh200->scanning) { - fusbh200->need_rescan = true; - return; - } - fusbh200->scanning = true; - - rescan: - fusbh200->need_rescan = false; - if (fusbh200->async_count) - scan_async(fusbh200); - if (fusbh200->intr_count > 0) - scan_intr(fusbh200); - if (fusbh200->isoc_count > 0) - scan_isoc(fusbh200); - if (fusbh200->need_rescan) - goto rescan; - fusbh200->scanning = false; - - /* the IO watchdog guards against hardware or driver bugs that - * misplace IRQs, and should let us run completely without IRQs. - * such lossage has been observed on both VT6202 and VT8235. - */ - turn_on_io_watchdog(fusbh200); -} - -/* - * Called when the fusbh200_hcd module is removed. - */ -static void fusbh200_stop (struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - - fusbh200_dbg (fusbh200, "stop\n"); - - /* no more interrupts ... */ - - spin_lock_irq(&fusbh200->lock); - fusbh200->enabled_hrtimer_events = 0; - spin_unlock_irq(&fusbh200->lock); - - fusbh200_quiesce(fusbh200); - fusbh200_silence_controller(fusbh200); - fusbh200_reset (fusbh200); - - hrtimer_cancel(&fusbh200->hrtimer); - remove_sysfs_files(fusbh200); - remove_debug_files (fusbh200); - - /* root hub is shut down separately (first, when possible) */ - spin_lock_irq (&fusbh200->lock); - end_free_itds(fusbh200); - spin_unlock_irq (&fusbh200->lock); - fusbh200_mem_cleanup (fusbh200); - - fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n", - fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa, - fusbh200->stats.lost_iaa); - fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n", - fusbh200->stats.complete, fusbh200->stats.unlink); - - dbg_status (fusbh200, "fusbh200_stop completed", - fusbh200_readl(fusbh200, &fusbh200->regs->status)); -} - -/* one-time init, only for memory state */ -static int hcd_fusbh200_init(struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd); - u32 temp; - int retval; - u32 hcc_params; - struct fusbh200_qh_hw *hw; - - spin_lock_init(&fusbh200->lock); - - /* - * keep io watchdog by default, those good HCDs could turn off it later - */ - fusbh200->need_io_watchdog = 1; - - hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - fusbh200->hrtimer.function = fusbh200_hrtimer_func; - fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT; - - hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params); - - /* - * by default set standard 80% (== 100 usec/uframe) max periodic - * bandwidth as required by USB 2.0 - */ - fusbh200->uframe_periodic_max = 100; - - /* - * hw default: 1K periodic list heads, one per frame. - * periodic_size can shrink by USBCMD update if hcc_params allows. - */ - fusbh200->periodic_size = DEFAULT_I_TDPS; - INIT_LIST_HEAD(&fusbh200->intr_qh_list); - INIT_LIST_HEAD(&fusbh200->cached_itd_list); - - if (HCC_PGM_FRAMELISTLEN(hcc_params)) { - /* periodic schedule size can be smaller than default */ - switch (FUSBH200_TUNE_FLS) { - case 0: fusbh200->periodic_size = 1024; break; - case 1: fusbh200->periodic_size = 512; break; - case 2: fusbh200->periodic_size = 256; break; - default: BUG(); - } - } - if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0) - return retval; - - /* controllers may cache some of the periodic schedule ... */ - fusbh200->i_thresh = 2; - - /* - * dedicate a qh for the async ring head, since we couldn't unlink - * a 'real' qh without stopping the async schedule [4.8]. use it - * as the 'reclamation list head' too. - * its dummy is used in hw_alt_next of many tds, to prevent the qh - * from automatically advancing to the next td after short reads. - */ - fusbh200->async->qh_next.qh = NULL; - hw = fusbh200->async->hw; - hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma); - hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD); - hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT); - hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200); - fusbh200->async->qh_state = QH_STATE_LINKED; - hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma); - - /* clear interrupt enables, set irq latency */ - if (log2_irq_thresh < 0 || log2_irq_thresh > 6) - log2_irq_thresh = 0; - temp = 1 << (16 + log2_irq_thresh); - if (HCC_CANPARK(hcc_params)) { - /* HW default park == 3, on hardware that supports it (like - * NVidia and ALI silicon), maximizes throughput on the async - * schedule by avoiding QH fetches between transfers. - * - * With fast usb storage devices and NForce2, "park" seems to - * make problems: throughput reduction (!), data errors... - */ - if (park) { - park = min(park, (unsigned) 3); - temp |= CMD_PARK; - temp |= park << 8; - } - fusbh200_dbg(fusbh200, "park %d\n", park); - } - if (HCC_PGM_FRAMELISTLEN(hcc_params)) { - /* periodic schedule size can be smaller than default */ - temp &= ~(3 << 2); - temp |= (FUSBH200_TUNE_FLS << 2); - } - fusbh200->command = temp; - - /* Accept arbitrarily long scatter-gather lists */ - if (!(hcd->driver->flags & HCD_LOCAL_MEM)) - hcd->self.sg_tablesize = ~0; - return 0; -} - -/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */ -static int fusbh200_run (struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - u32 temp; - u32 hcc_params; - - hcd->uses_new_polling = 1; - - /* EHCI spec section 4.1 */ - - fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list); - fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next); - - /* - * hcc_params controls whether fusbh200->regs->segment must (!!!) - * be used; it constrains QH/ITD/SITD and QTD locations. - * pci_pool consistent memory always uses segment zero. - * streaming mappings for I/O buffers, like pci_map_single(), - * can return segments above 4GB, if the device allows. - * - * NOTE: the dma mask is visible through dma_supported(), so - * drivers can pass this info along ... like NETIF_F_HIGHDMA, - * Scsi_Host.highmem_io, and so forth. It's readonly to all - * host side drivers though. - */ - hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params); - - // Philips, Intel, and maybe others need CMD_RUN before the - // root hub will detect new devices (why?); NEC doesn't - fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); - fusbh200->command |= CMD_RUN; - fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command); - dbg_cmd (fusbh200, "init", fusbh200->command); - - /* - * Start, enabling full USB 2.0 functionality ... usb 1.1 devices - * are explicitly handed to companion controller(s), so no TT is - * involved with the root hub. (Except where one is integrated, - * and there's no companion controller unless maybe for USB OTG.) - * - * Turning on the CF flag will transfer ownership of all ports - * from the companions to the EHCI controller. If any of the - * companions are in the middle of a port reset at the time, it - * could cause trouble. Write-locking ehci_cf_port_reset_rwsem - * guarantees that no resets are in progress. After we set CF, - * a short delay lets the hardware catch up; new resets shouldn't - * be started before the port switching actions could complete. - */ - down_write(&ehci_cf_port_reset_rwsem); - fusbh200->rh_state = FUSBH200_RH_RUNNING; - fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */ - msleep(5); - up_write(&ehci_cf_port_reset_rwsem); - fusbh200->last_periodic_enable = ktime_get_real(); - - temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase)); - fusbh200_info (fusbh200, - "USB %x.%x started, EHCI %x.%02x\n", - ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f), - temp >> 8, temp & 0xff); - - fusbh200_writel(fusbh200, INTR_MASK, - &fusbh200->regs->intr_enable); /* Turn On Interrupts */ - - /* GRR this is run-once init(), being done every time the HC starts. - * So long as they're part of class devices, we can't do it init() - * since the class device isn't created that early. - */ - create_debug_files(fusbh200); - create_sysfs_files(fusbh200); - - return 0; -} - -static int fusbh200_setup(struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd); - int retval; - - fusbh200->regs = (void __iomem *)fusbh200->caps + - HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase)); - dbg_hcs_params(fusbh200, "reset"); - dbg_hcc_params(fusbh200, "reset"); - - /* cache this readonly data; minimize chip reads */ - fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params); - - fusbh200->sbrn = HCD_USB2; - - /* data structure init */ - retval = hcd_fusbh200_init(hcd); - if (retval) - return retval; - - retval = fusbh200_halt(fusbh200); - if (retval) - return retval; - - fusbh200_reset(fusbh200); - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -static irqreturn_t fusbh200_irq (struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - u32 status, masked_status, pcd_status = 0, cmd; - int bh; - - spin_lock (&fusbh200->lock); - - status = fusbh200_readl(fusbh200, &fusbh200->regs->status); - - /* e.g. cardbus physical eject */ - if (status == ~(u32) 0) { - fusbh200_dbg (fusbh200, "device removed\n"); - goto dead; - } - - /* - * We don't use STS_FLR, but some controllers don't like it to - * remain on, so mask it out along with the other status bits. - */ - masked_status = status & (INTR_MASK | STS_FLR); - - /* Shared IRQ? */ - if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) { - spin_unlock(&fusbh200->lock); - return IRQ_NONE; - } - - /* clear (just) interrupts */ - fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status); - cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command); - bh = 0; - - /* normal [4.15.1.2] or error [4.15.1.1] completion */ - if (likely ((status & (STS_INT|STS_ERR)) != 0)) { - if (likely ((status & STS_ERR) == 0)) - COUNT (fusbh200->stats.normal); - else - COUNT (fusbh200->stats.error); - bh = 1; - } - - /* complete the unlinking of some qh [4.15.2.3] */ - if (status & STS_IAA) { - - /* Turn off the IAA watchdog */ - fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG); - - /* - * Mild optimization: Allow another IAAD to reset the - * hrtimer, if one occurs before the next expiration. - * In theory we could always cancel the hrtimer, but - * tests show that about half the time it will be reset - * for some other event anyway. - */ - if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG) - ++fusbh200->next_hrtimer_event; - - /* guard against (alleged) silicon errata */ - if (cmd & CMD_IAAD) - fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n"); - if (fusbh200->async_iaa) { - COUNT(fusbh200->stats.iaa); - end_unlink_async(fusbh200); - } else - fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n"); - } - - /* remote wakeup [4.3.1] */ - if (status & STS_PCD) { - int pstatus; - u32 __iomem *status_reg = &fusbh200->regs->port_status; - - /* kick root hub later */ - pcd_status = status; - - /* resume root hub? */ - if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED) - usb_hcd_resume_root_hub(hcd); - - pstatus = fusbh200_readl(fusbh200, status_reg); - - if (test_bit(0, &fusbh200->suspended_ports) && - ((pstatus & PORT_RESUME) || - !(pstatus & PORT_SUSPEND)) && - (pstatus & PORT_PE) && - fusbh200->reset_done[0] == 0) { - - /* start 20 msec resume signaling from this port, - * and make hub_wq collect PORT_STAT_C_SUSPEND to - * stop that signaling. Use 5 ms extra for safety, - * like usb_port_resume() does. - */ - fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25); - set_bit(0, &fusbh200->resuming_ports); - fusbh200_dbg (fusbh200, "port 1 remote wakeup\n"); - mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]); - } - } - - /* PCI errors [4.15.2.4] */ - if (unlikely ((status & STS_FATAL) != 0)) { - fusbh200_err(fusbh200, "fatal error\n"); - dbg_cmd(fusbh200, "fatal", cmd); - dbg_status(fusbh200, "fatal", status); -dead: - usb_hc_died(hcd); - - /* Don't let the controller do anything more */ - fusbh200->shutdown = true; - fusbh200->rh_state = FUSBH200_RH_STOPPING; - fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); - fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command); - fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable); - fusbh200_handle_controller_death(fusbh200); - - /* Handle completions when the controller stops */ - bh = 0; - } - - if (bh) - fusbh200_work (fusbh200); - spin_unlock (&fusbh200->lock); - if (pcd_status) - usb_hcd_poll_rh_status(hcd); - return IRQ_HANDLED; -} - -/*-------------------------------------------------------------------------*/ - -/* - * non-error returns are a promise to giveback() the urb later - * we drop ownership so next owner (or urb unlink) can get it - * - * urb + dev is in hcd.self.controller.urb_list - * we're queueing TDs onto software and hardware lists - * - * hcd-specific init for hcpriv hasn't been done yet - * - * NOTE: control, bulk, and interrupt share the same code to append TDs - * to a (possibly active) QH, and the same QH scanning code. - */ -static int fusbh200_urb_enqueue ( - struct usb_hcd *hcd, - struct urb *urb, - gfp_t mem_flags -) { - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - struct list_head qtd_list; - - INIT_LIST_HEAD (&qtd_list); - - switch (usb_pipetype (urb->pipe)) { - case PIPE_CONTROL: - /* qh_completions() code doesn't handle all the fault cases - * in multi-TD control transfers. Even 1KB is rare anyway. - */ - if (urb->transfer_buffer_length > (16 * 1024)) - return -EMSGSIZE; - /* FALLTHROUGH */ - /* case PIPE_BULK: */ - default: - if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags)) - return -ENOMEM; - return submit_async(fusbh200, urb, &qtd_list, mem_flags); - - case PIPE_INTERRUPT: - if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags)) - return -ENOMEM; - return intr_submit(fusbh200, urb, &qtd_list, mem_flags); - - case PIPE_ISOCHRONOUS: - return itd_submit (fusbh200, urb, mem_flags); - } -} - -/* remove from hardware lists - * completions normally happen asynchronously - */ - -static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - struct fusbh200_qh *qh; - unsigned long flags; - int rc; - - spin_lock_irqsave (&fusbh200->lock, flags); - rc = usb_hcd_check_unlink_urb(hcd, urb, status); - if (rc) - goto done; - - switch (usb_pipetype (urb->pipe)) { - // case PIPE_CONTROL: - // case PIPE_BULK: - default: - qh = (struct fusbh200_qh *) urb->hcpriv; - if (!qh) - break; - switch (qh->qh_state) { - case QH_STATE_LINKED: - case QH_STATE_COMPLETING: - start_unlink_async(fusbh200, qh); - break; - case QH_STATE_UNLINK: - case QH_STATE_UNLINK_WAIT: - /* already started */ - break; - case QH_STATE_IDLE: - /* QH might be waiting for a Clear-TT-Buffer */ - qh_completions(fusbh200, qh); - break; - } - break; - - case PIPE_INTERRUPT: - qh = (struct fusbh200_qh *) urb->hcpriv; - if (!qh) - break; - switch (qh->qh_state) { - case QH_STATE_LINKED: - case QH_STATE_COMPLETING: - start_unlink_intr(fusbh200, qh); - break; - case QH_STATE_IDLE: - qh_completions (fusbh200, qh); - break; - default: - fusbh200_dbg (fusbh200, "bogus qh %p state %d\n", - qh, qh->qh_state); - goto done; - } - break; - - case PIPE_ISOCHRONOUS: - // itd... - - // wait till next completion, do it then. - // completion irqs can wait up to 1024 msec, - break; - } -done: - spin_unlock_irqrestore (&fusbh200->lock, flags); - return rc; -} - -/*-------------------------------------------------------------------------*/ - -// bulk qh holds the data toggle - -static void -fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - unsigned long flags; - struct fusbh200_qh *qh, *tmp; - - /* ASSERT: any requests/urbs are being unlinked */ - /* ASSERT: nobody can be submitting urbs for this any more */ - -rescan: - spin_lock_irqsave (&fusbh200->lock, flags); - qh = ep->hcpriv; - if (!qh) - goto done; - - /* endpoints can be iso streams. for now, we don't - * accelerate iso completions ... so spin a while. - */ - if (qh->hw == NULL) { - struct fusbh200_iso_stream *stream = ep->hcpriv; - - if (!list_empty(&stream->td_list)) - goto idle_timeout; - - /* BUG_ON(!list_empty(&stream->free_list)); */ - kfree(stream); - goto done; - } - - if (fusbh200->rh_state < FUSBH200_RH_RUNNING) - qh->qh_state = QH_STATE_IDLE; - switch (qh->qh_state) { - case QH_STATE_LINKED: - case QH_STATE_COMPLETING: - for (tmp = fusbh200->async->qh_next.qh; - tmp && tmp != qh; - tmp = tmp->qh_next.qh) - continue; - /* periodic qh self-unlinks on empty, and a COMPLETING qh - * may already be unlinked. - */ - if (tmp) - start_unlink_async(fusbh200, qh); - /* FALL THROUGH */ - case QH_STATE_UNLINK: /* wait for hw to finish? */ - case QH_STATE_UNLINK_WAIT: -idle_timeout: - spin_unlock_irqrestore (&fusbh200->lock, flags); - schedule_timeout_uninterruptible(1); - goto rescan; - case QH_STATE_IDLE: /* fully unlinked */ - if (qh->clearing_tt) - goto idle_timeout; - if (list_empty (&qh->qtd_list)) { - qh_destroy(fusbh200, qh); - break; - } - /* else FALL THROUGH */ - default: - /* caller was supposed to have unlinked any requests; - * that's not our job. just leak this memory. - */ - fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n", - qh, ep->desc.bEndpointAddress, qh->qh_state, - list_empty (&qh->qtd_list) ? "" : "(has tds)"); - break; - } - done: - ep->hcpriv = NULL; - spin_unlock_irqrestore (&fusbh200->lock, flags); -} - -static void -fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd); - struct fusbh200_qh *qh; - int eptype = usb_endpoint_type(&ep->desc); - int epnum = usb_endpoint_num(&ep->desc); - int is_out = usb_endpoint_dir_out(&ep->desc); - unsigned long flags; - - if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) - return; - - spin_lock_irqsave(&fusbh200->lock, flags); - qh = ep->hcpriv; - - /* For Bulk and Interrupt endpoints we maintain the toggle state - * in the hardware; the toggle bits in udev aren't used at all. - * When an endpoint is reset by usb_clear_halt() we must reset - * the toggle bit in the QH. - */ - if (qh) { - usb_settoggle(qh->dev, epnum, is_out, 0); - if (!list_empty(&qh->qtd_list)) { - WARN_ONCE(1, "clear_halt for a busy endpoint\n"); - } else if (qh->qh_state == QH_STATE_LINKED || - qh->qh_state == QH_STATE_COMPLETING) { - - /* The toggle value in the QH can't be updated - * while the QH is active. Unlink it now; - * re-linking will call qh_refresh(). - */ - if (eptype == USB_ENDPOINT_XFER_BULK) - start_unlink_async(fusbh200, qh); - else - start_unlink_intr(fusbh200, qh); - } - } - spin_unlock_irqrestore(&fusbh200->lock, flags); -} - -static int fusbh200_get_frame (struct usb_hcd *hcd) -{ - struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd); - return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size; -} - -/*-------------------------------------------------------------------------*/ - -/* - * The EHCI in ChipIdea HDRC cannot be a separate module or device, - * because its registers (and irq) are shared between host/gadget/otg - * functions and in order to facilitate role switching we cannot - * give the fusbh200 driver exclusive access to those. - */ -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR (DRIVER_AUTHOR); -MODULE_LICENSE ("GPL"); - -static const struct hc_driver fusbh200_fusbh200_hc_driver = { - .description = hcd_name, - .product_desc = "Faraday USB2.0 Host Controller", - .hcd_priv_size = sizeof(struct fusbh200_hcd), - - /* - * generic hardware linkage - */ - .irq = fusbh200_irq, - .flags = HCD_MEMORY | HCD_USB2, - - /* - * basic lifecycle operations - */ - .reset = hcd_fusbh200_init, - .start = fusbh200_run, - .stop = fusbh200_stop, - .shutdown = fusbh200_shutdown, - - /* - * managing i/o requests and associated device resources - */ - .urb_enqueue = fusbh200_urb_enqueue, - .urb_dequeue = fusbh200_urb_dequeue, - .endpoint_disable = fusbh200_endpoint_disable, - .endpoint_reset = fusbh200_endpoint_reset, - - /* - * scheduling support - */ - .get_frame_number = fusbh200_get_frame, - - /* - * root hub support - */ - .hub_status_data = fusbh200_hub_status_data, - .hub_control = fusbh200_hub_control, - .bus_suspend = fusbh200_bus_suspend, - .bus_resume = fusbh200_bus_resume, - - .relinquish_port = fusbh200_relinquish_port, - .port_handed_over = fusbh200_port_handed_over, - - .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete, -}; - -static void fusbh200_init(struct fusbh200_hcd *fusbh200) -{ - u32 reg; - - reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr); - reg |= BMCSR_INT_POLARITY; - reg &= ~BMCSR_VBUS_OFF; - fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr); - - reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier); - fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN, - &fusbh200->regs->bmier); -} - -/** - * fusbh200_hcd_probe - initialize faraday FUSBH200 HCDs - * - * Allocates basic resources for this USB host controller, and - * then invokes the start() method for the HCD associated with it - * through the hotplug entry's driver_data. - */ -static int fusbh200_hcd_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct usb_hcd *hcd; - struct resource *res; - int irq; - int retval = -ENODEV; - struct fusbh200_hcd *fusbh200; - - if (usb_disabled()) - return -ENODEV; - - pdev->dev.power.power_state = PMSG_ON; - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, - "Found HC with no IRQ. Check %s setup!\n", - dev_name(dev)); - return -ENODEV; - } - - irq = res->start; - - hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev, - dev_name(dev)); - if (!hcd) { - dev_err(dev, "failed to create hcd with err %d\n", retval); - retval = -ENOMEM; - goto fail_create_hcd; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, - "Found HC with no register addr. Check %s setup!\n", - dev_name(dev)); - retval = -ENODEV; - goto fail_request_resource; - } - - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); - hcd->has_tt = 1; - - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, - fusbh200_fusbh200_hc_driver.description)) { - dev_dbg(dev, "controller already in use\n"); - retval = -EBUSY; - goto fail_request_resource; - } - - res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (!res) { - dev_err(dev, - "Found HC with no register addr. Check %s setup!\n", - dev_name(dev)); - retval = -ENODEV; - goto fail_request_resource; - } - - hcd->regs = ioremap_nocache(res->start, resource_size(res)); - if (hcd->regs == NULL) { - dev_dbg(dev, "error mapping memory\n"); - retval = -EFAULT; - goto fail_ioremap; - } - - fusbh200 = hcd_to_fusbh200(hcd); - - fusbh200->caps = hcd->regs; - - retval = fusbh200_setup(hcd); - if (retval) - goto fail_add_hcd; - - fusbh200_init(fusbh200); - - retval = usb_add_hcd(hcd, irq, IRQF_SHARED); - if (retval) { - dev_err(dev, "failed to add hcd with err %d\n", retval); - goto fail_add_hcd; - } - device_wakeup_enable(hcd->self.controller); - - return retval; - -fail_add_hcd: - iounmap(hcd->regs); -fail_ioremap: - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); -fail_request_resource: - usb_put_hcd(hcd); -fail_create_hcd: - dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval); - return retval; -} - -/** - * fusbh200_hcd_remove - shutdown processing for EHCI HCDs - * @dev: USB Host Controller being removed - * - * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking - * the HCD's stop() method. It is always called from a thread - * context, normally "rmmod", "apmd", or something similar. - */ -static int fusbh200_hcd_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct usb_hcd *hcd = dev_get_drvdata(dev); - - if (!hcd) - return 0; - - usb_remove_hcd(hcd); - iounmap(hcd->regs); - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - usb_put_hcd(hcd); - - return 0; -} - -static struct platform_driver fusbh200_hcd_fusbh200_driver = { - .driver = { - .name = "fusbh200", - }, - .probe = fusbh200_hcd_probe, - .remove = fusbh200_hcd_remove, -}; - -static int __init fusbh200_hcd_init(void) -{ - int retval = 0; - - if (usb_disabled()) - return -ENODEV; - - printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); - set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); - if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || - test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) - printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded" - " before uhci_hcd and ohci_hcd, not after\n"); - - pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n", - hcd_name, - sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd), - sizeof(struct fusbh200_itd)); - - fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root); - if (!fusbh200_debug_root) { - retval = -ENOENT; - goto err_debug; - } - - retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver); - if (retval < 0) - goto clean; - return retval; - - platform_driver_unregister(&fusbh200_hcd_fusbh200_driver); -clean: - debugfs_remove(fusbh200_debug_root); - fusbh200_debug_root = NULL; -err_debug: - clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); - return retval; -} -module_init(fusbh200_hcd_init); - -static void __exit fusbh200_hcd_cleanup(void) -{ - platform_driver_unregister(&fusbh200_hcd_fusbh200_driver); - debugfs_remove(fusbh200_debug_root); - clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); -} -module_exit(fusbh200_hcd_cleanup); diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h deleted file mode 100644 index d6e5b3d4aa68..000000000000 --- a/drivers/usb/host/fusbh200.h +++ /dev/null @@ -1,675 +0,0 @@ -#ifndef __LINUX_FUSBH200_H -#define __LINUX_FUSBH200_H - -#include <linux/usb/ehci-dbgp.h> - -/* definitions used for the EHCI driver */ - -/* - * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to - * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on - * the host controller implementation. - * - * To facilitate the strongest possible byte-order checking from "sparse" - * and so on, we use __leXX unless that's not practical. - */ -#define __hc32 __le32 -#define __hc16 __le16 - -/* statistics can be kept for tuning/monitoring */ -struct fusbh200_stats { - /* irq usage */ - unsigned long normal; - unsigned long error; - unsigned long iaa; - unsigned long lost_iaa; - - /* termination of urbs from core */ - unsigned long complete; - unsigned long unlink; -}; - -/* fusbh200_hcd->lock guards shared data against other CPUs: - * fusbh200_hcd: async, unlink, periodic (and shadow), ... - * usb_host_endpoint: hcpriv - * fusbh200_qh: qh_next, qtd_list - * fusbh200_qtd: qtd_list - * - * Also, hold this lock when talking to HC registers or - * when updating hw_* fields in shared qh/qtd/... structures. - */ - -#define FUSBH200_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */ - -/* - * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the - * controller may be doing DMA. Lower values mean there's no DMA. - */ -enum fusbh200_rh_state { - FUSBH200_RH_HALTED, - FUSBH200_RH_SUSPENDED, - FUSBH200_RH_RUNNING, - FUSBH200_RH_STOPPING -}; - -/* - * Timer events, ordered by increasing delay length. - * Always update event_delays_ns[] and event_handlers[] (defined in - * ehci-timer.c) in parallel with this list. - */ -enum fusbh200_hrtimer_event { - FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */ - FUSBH200_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ - FUSBH200_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ - FUSBH200_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ - FUSBH200_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ - FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ - FUSBH200_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ - FUSBH200_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ - FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ - FUSBH200_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ - FUSBH200_HRTIMER_NUM_EVENTS /* Must come last */ -}; -#define FUSBH200_HRTIMER_NO_EVENT 99 - -struct fusbh200_hcd { /* one per controller */ - /* timing support */ - enum fusbh200_hrtimer_event next_hrtimer_event; - unsigned enabled_hrtimer_events; - ktime_t hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS]; - struct hrtimer hrtimer; - - int PSS_poll_count; - int ASS_poll_count; - int died_poll_count; - - /* glue to PCI and HCD framework */ - struct fusbh200_caps __iomem *caps; - struct fusbh200_regs __iomem *regs; - struct ehci_dbg_port __iomem *debug; - - __u32 hcs_params; /* cached register copy */ - spinlock_t lock; - enum fusbh200_rh_state rh_state; - - /* general schedule support */ - bool scanning:1; - bool need_rescan:1; - bool intr_unlinking:1; - bool async_unlinking:1; - bool shutdown:1; - struct fusbh200_qh *qh_scan_next; - - /* async schedule support */ - struct fusbh200_qh *async; - struct fusbh200_qh *dummy; /* For AMD quirk use */ - struct fusbh200_qh *async_unlink; - struct fusbh200_qh *async_unlink_last; - struct fusbh200_qh *async_iaa; - unsigned async_unlink_cycle; - unsigned async_count; /* async activity count */ - - /* periodic schedule support */ -#define DEFAULT_I_TDPS 1024 /* some HCs can do less */ - unsigned periodic_size; - __hc32 *periodic; /* hw periodic table */ - dma_addr_t periodic_dma; - struct list_head intr_qh_list; - unsigned i_thresh; /* uframes HC might cache */ - - union fusbh200_shadow *pshadow; /* mirror hw periodic table */ - struct fusbh200_qh *intr_unlink; - struct fusbh200_qh *intr_unlink_last; - unsigned intr_unlink_cycle; - unsigned now_frame; /* frame from HC hardware */ - unsigned next_frame; /* scan periodic, start here */ - unsigned intr_count; /* intr activity count */ - unsigned isoc_count; /* isoc activity count */ - unsigned periodic_count; /* periodic activity count */ - unsigned uframe_periodic_max; /* max periodic time per uframe */ - - - /* list of itds completed while now_frame was still active */ - struct list_head cached_itd_list; - struct fusbh200_itd *last_itd_to_free; - - /* per root hub port */ - unsigned long reset_done [FUSBH200_MAX_ROOT_PORTS]; - - /* bit vectors (one bit per port) */ - unsigned long bus_suspended; /* which ports were - already suspended at the start of a bus suspend */ - unsigned long companion_ports; /* which ports are - dedicated to the companion controller */ - unsigned long owned_ports; /* which ports are - owned by the companion during a bus suspend */ - unsigned long port_c_suspend; /* which ports have - the change-suspend feature turned on */ - unsigned long suspended_ports; /* which ports are - suspended */ - unsigned long resuming_ports; /* which ports have - started to resume */ - - /* per-HC memory pools (could be per-bus, but ...) */ - struct dma_pool *qh_pool; /* qh per active urb */ - struct dma_pool *qtd_pool; /* one or more per qh */ - struct dma_pool *itd_pool; /* itd per iso urb */ - - unsigned random_frame; - unsigned long next_statechange; - ktime_t last_periodic_enable; - u32 command; - - /* SILICON QUIRKS */ - unsigned need_io_watchdog:1; - unsigned fs_i_thresh:1; /* Intel iso scheduling */ - - u8 sbrn; /* packed release number */ - - /* irq statistics */ - struct fusbh200_stats stats; -# define COUNT(x) do { (x)++; } while (0) - - /* debug files */ - struct dentry *debug_dir; -}; - -/* convert between an HCD pointer and the corresponding FUSBH200_HCD */ -static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd) -{ - return (struct fusbh200_hcd *) (hcd->hcd_priv); -} -static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200) -{ - return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv); -} - -/*-------------------------------------------------------------------------*/ - -/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ - -/* Section 2.2 Host Controller Capability Registers */ -struct fusbh200_caps { - /* these fields are specified as 8 and 16 bit registers, - * but some hosts can't perform 8 or 16 bit PCI accesses. - * some hosts treat caplength and hciversion as parts of a 32-bit - * register, others treat them as two separate registers, this - * affects the memory map for big endian controllers. - */ - u32 hc_capbase; -#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \ - (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0))) -#define HC_VERSION(fusbh200, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \ - (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16))) - u32 hcs_params; /* HCSPARAMS - offset 0x4 */ -#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ - - u32 hcc_params; /* HCCPARAMS - offset 0x8 */ -#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ -#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ - u8 portroute[8]; /* nibbles for routing - offset 0xC */ -}; - - -/* Section 2.3 Host Controller Operational Registers */ -struct fusbh200_regs { - - /* USBCMD: offset 0x00 */ - u32 command; - -/* EHCI 1.1 addendum */ -/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ -#define CMD_PARK (1<<11) /* enable "park" on async qh */ -#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ -#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ -#define CMD_ASE (1<<5) /* async schedule enable */ -#define CMD_PSE (1<<4) /* periodic schedule enable */ -/* 3:2 is periodic frame list size */ -#define CMD_RESET (1<<1) /* reset HC not bus */ -#define CMD_RUN (1<<0) /* start/stop HC */ - - /* USBSTS: offset 0x04 */ - u32 status; -#define STS_ASS (1<<15) /* Async Schedule Status */ -#define STS_PSS (1<<14) /* Periodic Schedule Status */ -#define STS_RECL (1<<13) /* Reclamation */ -#define STS_HALT (1<<12) /* Not running (any reason) */ -/* some bits reserved */ - /* these STS_* flags are also intr_enable bits (USBINTR) */ -#define STS_IAA (1<<5) /* Interrupted on async advance */ -#define STS_FATAL (1<<4) /* such as some PCI access errors */ -#define STS_FLR (1<<3) /* frame list rolled over */ -#define STS_PCD (1<<2) /* port change detect */ -#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ -#define STS_INT (1<<0) /* "normal" completion (short, ...) */ - - /* USBINTR: offset 0x08 */ - u32 intr_enable; - - /* FRINDEX: offset 0x0C */ - u32 frame_index; /* current microframe number */ - /* CTRLDSSEGMENT: offset 0x10 */ - u32 segment; /* address bits 63:32 if needed */ - /* PERIODICLISTBASE: offset 0x14 */ - u32 frame_list; /* points to periodic list */ - /* ASYNCLISTADDR: offset 0x18 */ - u32 async_next; /* address of next async queue head */ - - u32 reserved1; - /* PORTSC: offset 0x20 */ - u32 port_status; -/* 31:23 reserved */ -#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ -#define PORT_RESET (1<<8) /* reset port */ -#define PORT_SUSPEND (1<<7) /* suspend port */ -#define PORT_RESUME (1<<6) /* resume it */ -#define PORT_PEC (1<<3) /* port enable change */ -#define PORT_PE (1<<2) /* port enable */ -#define PORT_CSC (1<<1) /* connect status change */ -#define PORT_CONNECT (1<<0) /* device connected */ -#define PORT_RWC_BITS (PORT_CSC | PORT_PEC) - - u32 reserved2[3]; - - /* BMCSR: offset 0x30 */ - u32 bmcsr; /* Bus Moniter Control/Status Register */ -#define BMCSR_HOST_SPD_TYP (3<<9) -#define BMCSR_VBUS_OFF (1<<4) -#define BMCSR_INT_POLARITY (1<<3) - - /* BMISR: offset 0x34 */ - u32 bmisr; /* Bus Moniter Interrupt Status Register*/ -#define BMISR_OVC (1<<1) - - /* BMIER: offset 0x38 */ - u32 bmier; /* Bus Moniter Interrupt Enable Register */ -#define BMIER_OVC_EN (1<<1) -#define BMIER_VBUS_ERR_EN (1<<0) -}; - -/*-------------------------------------------------------------------------*/ - -#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma) - -/* - * EHCI Specification 0.95 Section 3.5 - * QTD: describe data transfer components (buffer, direction, ...) - * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram". - * - * These are associated only with "QH" (Queue Head) structures, - * used with control, bulk, and interrupt transfers. - */ -struct fusbh200_qtd { - /* first part defined by EHCI spec */ - __hc32 hw_next; /* see EHCI 3.5.1 */ - __hc32 hw_alt_next; /* see EHCI 3.5.2 */ - __hc32 hw_token; /* see EHCI 3.5.3 */ -#define QTD_TOGGLE (1 << 31) /* data toggle */ -#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) -#define QTD_IOC (1 << 15) /* interrupt on complete */ -#define QTD_CERR(tok) (((tok)>>10) & 0x3) -#define QTD_PID(tok) (((tok)>>8) & 0x3) -#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */ -#define QTD_STS_HALT (1 << 6) /* halted on error */ -#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */ -#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */ -#define QTD_STS_XACT (1 << 3) /* device gave illegal response */ -#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ -#define QTD_STS_STS (1 << 1) /* split transaction state */ -#define QTD_STS_PING (1 << 0) /* issue PING? */ - -#define ACTIVE_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_ACTIVE) -#define HALT_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_HALT) -#define STATUS_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_STS) - - __hc32 hw_buf [5]; /* see EHCI 3.5.4 */ - __hc32 hw_buf_hi [5]; /* Appendix B */ - - /* the rest is HCD-private */ - dma_addr_t qtd_dma; /* qtd address */ - struct list_head qtd_list; /* sw qtd list */ - struct urb *urb; /* qtd's urb */ - size_t length; /* length of buffer */ -} __attribute__ ((aligned (32))); - -/* mask NakCnt+T in qh->hw_alt_next */ -#define QTD_MASK(fusbh200) cpu_to_hc32 (fusbh200, ~0x1f) - -#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1) - -/*-------------------------------------------------------------------------*/ - -/* type tag from {qh,itd,fstn}->hw_next */ -#define Q_NEXT_TYPE(fusbh200,dma) ((dma) & cpu_to_hc32(fusbh200, 3 << 1)) - -/* - * Now the following defines are not converted using the - * cpu_to_le32() macro anymore, since we have to support - * "dynamic" switching between be and le support, so that the driver - * can be used on one system with SoC EHCI controller using big-endian - * descriptors as well as a normal little-endian PCI EHCI controller. - */ -/* values for that type tag */ -#define Q_TYPE_ITD (0 << 1) -#define Q_TYPE_QH (1 << 1) -#define Q_TYPE_SITD (2 << 1) -#define Q_TYPE_FSTN (3 << 1) - -/* next async queue entry, or pointer to interrupt/periodic QH */ -#define QH_NEXT(fusbh200,dma) (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH)) - -/* for periodic/async schedules and qtd lists, mark end of list */ -#define FUSBH200_LIST_END(fusbh200) cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */ - -/* - * Entries in periodic shadow table are pointers to one of four kinds - * of data structure. That's dictated by the hardware; a type tag is - * encoded in the low bits of the hardware's periodic schedule. Use - * Q_NEXT_TYPE to get the tag. - * - * For entries in the async schedule, the type tag always says "qh". - */ -union fusbh200_shadow { - struct fusbh200_qh *qh; /* Q_TYPE_QH */ - struct fusbh200_itd *itd; /* Q_TYPE_ITD */ - struct fusbh200_fstn *fstn; /* Q_TYPE_FSTN */ - __hc32 *hw_next; /* (all types) */ - void *ptr; -}; - -/*-------------------------------------------------------------------------*/ - -/* - * EHCI Specification 0.95 Section 3.6 - * QH: describes control/bulk/interrupt endpoints - * See Fig 3-7 "Queue Head Structure Layout". - * - * These appear in both the async and (for interrupt) periodic schedules. - */ - -/* first part defined by EHCI spec */ -struct fusbh200_qh_hw { - __hc32 hw_next; /* see EHCI 3.6.1 */ - __hc32 hw_info1; /* see EHCI 3.6.2 */ -#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ -#define QH_HEAD (1 << 15) /* Head of async reclamation list */ -#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ -#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ -#define QH_LOW_SPEED (1 << 12) -#define QH_FULL_SPEED (0 << 12) -#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ - __hc32 hw_info2; /* see EHCI 3.6.2 */ -#define QH_SMASK 0x000000ff -#define QH_CMASK 0x0000ff00 -#define QH_HUBADDR 0x007f0000 -#define QH_HUBPORT 0x3f800000 -#define QH_MULT 0xc0000000 - __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */ - - /* qtd overlay (hardware parts of a struct fusbh200_qtd) */ - __hc32 hw_qtd_next; - __hc32 hw_alt_next; - __hc32 hw_token; - __hc32 hw_buf [5]; - __hc32 hw_buf_hi [5]; -} __attribute__ ((aligned(32))); - -struct fusbh200_qh { - struct fusbh200_qh_hw *hw; /* Must come first */ - /* the rest is HCD-private */ - dma_addr_t qh_dma; /* address of qh */ - union fusbh200_shadow qh_next; /* ptr to qh; or periodic */ - struct list_head qtd_list; /* sw qtd list */ - struct list_head intr_node; /* list of intr QHs */ - struct fusbh200_qtd *dummy; - struct fusbh200_qh *unlink_next; /* next on unlink list */ - - unsigned unlink_cycle; - - u8 needs_rescan; /* Dequeue during giveback */ - u8 qh_state; -#define QH_STATE_LINKED 1 /* HC sees this */ -#define QH_STATE_UNLINK 2 /* HC may still see this */ -#define QH_STATE_IDLE 3 /* HC doesn't see this */ -#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ -#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ - - u8 xacterrs; /* XactErr retry counter */ -#define QH_XACTERR_MAX 32 /* XactErr retry limit */ - - /* periodic schedule info */ - u8 usecs; /* intr bandwidth */ - u8 gap_uf; /* uframes split/csplit gap */ - u8 c_usecs; /* ... split completion bw */ - u16 tt_usecs; /* tt downstream bandwidth */ - unsigned short period; /* polling interval */ - unsigned short start; /* where polling starts */ -#define NO_FRAME ((unsigned short)~0) /* pick new start */ - - struct usb_device *dev; /* access to TT */ - unsigned is_out:1; /* bulk or intr OUT */ - unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ -}; - -/*-------------------------------------------------------------------------*/ - -/* description of one iso transaction (up to 3 KB data if highspeed) */ -struct fusbh200_iso_packet { - /* These will be copied to iTD when scheduling */ - u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ - __hc32 transaction; /* itd->hw_transaction[i] |= */ - u8 cross; /* buf crosses pages */ - /* for full speed OUT splits */ - u32 buf1; -}; - -/* temporary schedule data for packets from iso urbs (both speeds) - * each packet is one logical usb transaction to the device (not TT), - * beginning at stream->next_uframe - */ -struct fusbh200_iso_sched { - struct list_head td_list; - unsigned span; - struct fusbh200_iso_packet packet [0]; -}; - -/* - * fusbh200_iso_stream - groups all (s)itds for this endpoint. - * acts like a qh would, if EHCI had them for ISO. - */ -struct fusbh200_iso_stream { - /* first field matches fusbh200_hq, but is NULL */ - struct fusbh200_qh_hw *hw; - - u8 bEndpointAddress; - u8 highspeed; - struct list_head td_list; /* queued itds */ - struct list_head free_list; /* list of unused itds */ - struct usb_device *udev; - struct usb_host_endpoint *ep; - - /* output of (re)scheduling */ - int next_uframe; - __hc32 splits; - - /* the rest is derived from the endpoint descriptor, - * trusting urb->interval == f(epdesc->bInterval) and - * including the extra info for hw_bufp[0..2] - */ - u8 usecs, c_usecs; - u16 interval; - u16 tt_usecs; - u16 maxp; - u16 raw_mask; - unsigned bandwidth; - - /* This is used to initialize iTD's hw_bufp fields */ - __hc32 buf0; - __hc32 buf1; - __hc32 buf2; - - /* this is used to initialize sITD's tt info */ - __hc32 address; -}; - -/*-------------------------------------------------------------------------*/ - -/* - * EHCI Specification 0.95 Section 3.3 - * Fig 3-4 "Isochronous Transaction Descriptor (iTD)" - * - * Schedule records for high speed iso xfers - */ -struct fusbh200_itd { - /* first part defined by EHCI spec */ - __hc32 hw_next; /* see EHCI 3.3.1 */ - __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */ -#define FUSBH200_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ -#define FUSBH200_ISOC_BUF_ERR (1<<30) /* Data buffer error */ -#define FUSBH200_ISOC_BABBLE (1<<29) /* babble detected */ -#define FUSBH200_ISOC_XACTERR (1<<28) /* XactErr - transaction error */ -#define FUSBH200_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) -#define FUSBH200_ITD_IOC (1 << 15) /* interrupt on complete */ - -#define ITD_ACTIVE(fusbh200) cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE) - - __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */ - __hc32 hw_bufp_hi [7]; /* Appendix B */ - - /* the rest is HCD-private */ - dma_addr_t itd_dma; /* for this itd */ - union fusbh200_shadow itd_next; /* ptr to periodic q entry */ - - struct urb *urb; - struct fusbh200_iso_stream *stream; /* endpoint's queue */ - struct list_head itd_list; /* list of stream's itds */ - - /* any/all hw_transactions here may be used by that urb */ - unsigned frame; /* where scheduled */ - unsigned pg; - unsigned index[8]; /* in urb->iso_frame_desc */ -} __attribute__ ((aligned (32))); - -/*-------------------------------------------------------------------------*/ - -/* - * EHCI Specification 0.96 Section 3.7 - * Periodic Frame Span Traversal Node (FSTN) - * - * Manages split interrupt transactions (using TT) that span frame boundaries - * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN - * makes the HC jump (back) to a QH to scan for fs/ls QH completions until - * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. - */ -struct fusbh200_fstn { - __hc32 hw_next; /* any periodic q entry */ - __hc32 hw_prev; /* qh or FUSBH200_LIST_END */ - - /* the rest is HCD-private */ - dma_addr_t fstn_dma; - union fusbh200_shadow fstn_next; /* ptr to periodic q entry */ -} __attribute__ ((aligned (32))); - -/*-------------------------------------------------------------------------*/ - -/* Prepare the PORTSC wakeup flags during controller suspend/resume */ - -#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup) \ - fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup); - -#define fusbh200_prepare_ports_for_controller_resume(fusbh200) \ - fusbh200_adjust_port_wakeup_flags(fusbh200, false, false); - -/*-------------------------------------------------------------------------*/ - -/* - * Some EHCI controllers have a Transaction Translator built into the - * root hub. This is a non-standard feature. Each controller will need - * to add code to the following inline functions, and call them as - * needed (mostly in root hub code). - */ - -static inline unsigned int -fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc) -{ - return (readl(&fusbh200->regs->bmcsr) - & BMCSR_HOST_SPD_TYP) >> 9; -} - -/* Returns the speed of a device attached to a port on the root hub. */ -static inline unsigned int -fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc) -{ - switch (fusbh200_get_speed(fusbh200, portsc)) { - case 0: - return 0; - case 1: - return USB_PORT_STAT_LOW_SPEED; - case 2: - default: - return USB_PORT_STAT_HIGH_SPEED; - } -} - -/*-------------------------------------------------------------------------*/ - -#define fusbh200_has_fsl_portno_bug(e) (0) - -/* - * While most USB host controllers implement their registers in - * little-endian format, a minority (celleb companion chip) implement - * them in big endian format. - * - * This attempts to support either format at compile time without a - * runtime penalty, or both formats with the additional overhead - * of checking a flag bit. - * - */ - -#define fusbh200_big_endian_mmio(e) 0 -#define fusbh200_big_endian_capbase(e) 0 - -static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200, - __u32 __iomem * regs) -{ - return readl(regs); -} - -static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200, - const unsigned int val, __u32 __iomem *regs) -{ - writel(val, regs); -} - -/* cpu to fusbh200 */ -static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x) -{ - return cpu_to_le32(x); -} - -/* fusbh200 to cpu */ -static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x) -{ - return le32_to_cpu(x); -} - -static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x) -{ - return le32_to_cpup(x); -} - -/*-------------------------------------------------------------------------*/ - -static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200) -{ - return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index); -} - -#define fusbh200_itdlen(urb, desc, t) ({ \ - usb_pipein((urb)->pipe) ? \ - (desc)->length - FUSBH200_ITD_LENGTH(t) : \ - FUSBH200_ITD_LENGTH(t); \ -}) -/*-------------------------------------------------------------------------*/ - -#endif /* __LINUX_FUSBH200_H */ diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c index 707437c88d03..56478ed2f932 100644 --- a/drivers/usb/host/ohci-spear.c +++ b/drivers/usb/host/ohci-spear.c @@ -161,6 +161,7 @@ static const struct of_device_id spear_ohci_id_table[] = { { .compatible = "st,spear600-ohci", }, { }, }; +MODULE_DEVICE_TABLE(of, spear_ohci_id_table); /* Driver definition to register with the platform bus */ static struct platform_driver spear_ohci_hcd_driver = { diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index a67bd5090330..0a94895a358d 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -2245,8 +2245,7 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, struct u132 *u132 = hcd_to_u132(hcd); if (irqs_disabled()) { if (__GFP_WAIT & mem_flags) { - printk(KERN_ERR "invalid context for function that migh" - "t sleep\n"); + printk(KERN_ERR "invalid context for function that might sleep\n"); return -EINVAL; } } diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 3a3e3eeba291..32a6f3d8deec 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -140,6 +140,7 @@ static const struct of_device_id platform_uhci_ids[] = { { .compatible = "platform-uhci", }, {} }; +MODULE_DEVICE_TABLE(of, platform_uhci_ids); static struct platform_driver uhci_platform_driver = { .probe = uhci_hcd_platform_probe, diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c index d3e13b640d4b..e36372393bb1 100644 --- a/drivers/usb/host/whci/init.c +++ b/drivers/usb/host/whci/init.c @@ -175,8 +175,7 @@ void whc_clean_up(struct whc *whc) pzl_clean_up(whc); asl_clean_up(whc); - if (whc->qset_pool) - dma_pool_destroy(whc->qset_pool); + dma_pool_destroy(whc->qset_pool); len = resource_size(&whc->umc->resource); if (whc->base) diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 2d16faefb429..74c42f722678 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -58,16 +58,17 @@ void xhci_dbg_regs(struct xhci_hcd *xhci) static void xhci_print_cap_regs(struct xhci_hcd *xhci) { u32 temp; + u32 hci_version; xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs); temp = readl(&xhci->cap_regs->hc_capbase); + hci_version = HC_VERSION(temp); xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n", (unsigned int) temp); xhci_dbg(xhci, "CAPLENGTH: 0x%x\n", (unsigned int) HC_LENGTH(temp)); - xhci_dbg(xhci, "HCIVERSION: 0x%x\n", - (unsigned int) HC_VERSION(temp)); + xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version); temp = readl(&xhci->cap_regs->hcs_params1); xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n", @@ -108,6 +109,18 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) temp = readl(&xhci->cap_regs->run_regs_off); xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); + + /* xhci 1.1 controllers have the HCCPARAMS2 register */ + if (hci_version > 100) { + temp = readl(&xhci->cap_regs->hcc_params2); + xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); + xhci_dbg(xhci, " HC %s Force save context capability", + HCC2_FSC(temp) ? "supports" : "doesn't support"); + xhci_dbg(xhci, " HC %s Large ESIT Payload Capability", + HCC2_LEC(temp) ? "supports" : "doesn't support"); + xhci_dbg(xhci, " HC %s Extended TBC capability", + HCC2_ETC(temp) ? "supports" : "doesn't support"); + } } static void xhci_print_command_reg(struct xhci_hcd *xhci) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 78241b5550df..5d2d7e954bd4 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -31,13 +31,15 @@ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ PORT_RC | PORT_PLC | PORT_PE) -/* USB 3.0 BOS descriptor and a capability descriptor, combined */ +/* USB 3 BOS descriptor and a capability descriptors, combined. + * Fields will be adjusted and added later in xhci_create_usb3_bos_desc() + */ static u8 usb_bos_descriptor [] = { USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */ USB_DT_BOS, /* __u8 bDescriptorType */ 0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */ 0x1, /* __u8 bNumDeviceCaps */ - /* First device capability */ + /* First device capability, SuperSpeed */ USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */ USB_DT_DEVICE_CAPABILITY, /* Device Capability */ USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */ @@ -46,9 +48,108 @@ static u8 usb_bos_descriptor [] = { 0x03, /* bFunctionalitySupport, USB 3.0 speed only */ 0x00, /* bU1DevExitLat, set later. */ - 0x00, 0x00 /* __le16 bU2DevExitLat, set later. */ + 0x00, 0x00, /* __le16 bU2DevExitLat, set later. */ + /* Second device capability, SuperSpeedPlus */ + 0x0c, /* bLength 12, will be adjusted later */ + USB_DT_DEVICE_CAPABILITY, /* Device Capability */ + USB_SSP_CAP_TYPE, /* bDevCapabilityType SUPERSPEED_PLUS */ + 0x00, /* bReserved 0 */ + 0x00, 0x00, 0x00, 0x00, /* bmAttributes, get from xhci psic */ + 0x00, 0x00, /* wFunctionalitySupport */ + 0x00, 0x00, /* wReserved 0 */ + /* Sublink Speed Attributes are added in xhci_create_usb3_bos_desc() */ }; +static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, + u16 wLength) +{ + int i, ssa_count; + u32 temp; + u16 desc_size, ssp_cap_size, ssa_size = 0; + bool usb3_1 = false; + + desc_size = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; + ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size; + + /* does xhci support USB 3.1 Enhanced SuperSpeed */ + if (xhci->usb3_rhub.min_rev >= 0x01 && xhci->usb3_rhub.psi_uid_count) { + /* two SSA entries for each unique PSI ID, one RX and one TX */ + ssa_count = xhci->usb3_rhub.psi_uid_count * 2; + ssa_size = ssa_count * sizeof(u32); + desc_size += ssp_cap_size; + usb3_1 = true; + } + memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength)); + + if (usb3_1) { + /* modify bos descriptor bNumDeviceCaps and wTotalLength */ + buf[4] += 1; + put_unaligned_le16(desc_size + ssa_size, &buf[2]); + } + + if (wLength < USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE) + return wLength; + + /* Indicate whether the host has LTM support. */ + temp = readl(&xhci->cap_regs->hcc_params); + if (HCC_LTC(temp)) + buf[8] |= USB_LTM_SUPPORT; + + /* Set the U1 and U2 exit latencies. */ + if ((xhci->quirks & XHCI_LPM_SUPPORT)) { + temp = readl(&xhci->cap_regs->hcs_params3); + buf[12] = HCS_U1_LATENCY(temp); + put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]); + } + + if (usb3_1) { + u32 ssp_cap_base, bm_attrib, psi; + int offset; + + ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; + + if (wLength < desc_size) + return wLength; + buf[ssp_cap_base] = ssp_cap_size + ssa_size; + + /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */ + bm_attrib = (ssa_count - 1) & 0x1f; + bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5; + put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]); + + if (wLength < desc_size + ssa_size) + return wLength; + /* + * Create the Sublink Speed Attributes (SSA) array. + * The xhci PSI field and USB 3.1 SSA fields are very similar, + * but link type bits 7:6 differ for values 01b and 10b. + * xhci has also only one PSI entry for a symmetric link when + * USB 3.1 requires two SSA entries (RX and TX) for every link + */ + offset = desc_size; + for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { + psi = xhci->usb3_rhub.psi[i]; + psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; + if ((psi & PLT_MASK) == PLT_SYM) { + /* Symmetric, create SSA RX and TX from one PSI entry */ + put_unaligned_le32(psi, &buf[offset]); + psi |= 1 << 7; /* turn entry to TX */ + offset += 4; + if (offset >= desc_size + ssa_size) + return desc_size + ssa_size; + } else if ((psi & PLT_MASK) == PLT_ASYM_RX) { + /* Asymetric RX, flip bits 7:6 for SSA */ + psi ^= PLT_MASK; + } + put_unaligned_le32(psi, &buf[offset]); + offset += 4; + if (offset >= desc_size + ssa_size) + return desc_size + ssa_size; + } + } + /* ssa_size is 0 for other than usb 3.1 hosts */ + return desc_size + ssa_size; +} static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, struct usb_hub_descriptor *desc, int ports) @@ -161,7 +262,7 @@ static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, struct usb_hub_descriptor *desc) { - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) xhci_usb3_hub_descriptor(hcd, xhci, desc); else xhci_usb2_hub_descriptor(hcd, xhci, desc); @@ -250,7 +351,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, if (!xhci->devs[i]) continue; speed = xhci->devs[i]->udev->speed; - if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3)) + if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) && xhci->devs[i]->fake_port == port) { slot_id = i; break; @@ -339,7 +440,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, u16 wIndex, __le32 __iomem *addr, u32 port_status) { /* Don't allow the USB core to disable SuperSpeed ports. */ - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { xhci_dbg(xhci, "Ignoring request to disable " "SuperSpeed port.\n"); return; @@ -407,7 +508,7 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array) int max_ports; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { max_ports = xhci->num_usb3_ports; *port_array = xhci->usb3_ports; } else { @@ -558,6 +659,22 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, } } +static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li) +{ + u32 ext_stat = 0; + int speed_id; + + /* only support rx and tx lane counts of 1 in usb3.1 spec */ + speed_id = DEV_PORT_SPEED(raw_port_status); + ext_stat |= speed_id; /* bits 3:0, RX speed id */ + ext_stat |= speed_id << 4; /* bits 7:4, TX speed id */ + + ext_stat |= PORT_RX_LANES(port_li) << 8; /* bits 11:8 Rx lane count */ + ext_stat |= PORT_TX_LANES(port_li) << 12; /* bits 15:12 Tx lane count */ + + return ext_stat; +} + /* * Converts a raw xHCI port status into the format that external USB 2.0 or USB * 3.0 hubs use. @@ -590,7 +707,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, if ((raw_port_status & PORT_RC)) status |= USB_PORT_STAT_C_RESET << 16; /* USB3.0 only */ - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { /* Port link change with port in resume state should not be * reported to usbcore, as this is an internal state to be * handled by xhci driver. Reporting PLC to usbcore may @@ -606,13 +723,13 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; } - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3 && (raw_port_status & PORT_POWER)) status |= USB_PORT_STAT_SUSPEND; } if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && - !DEV_SUPERSPEED(raw_port_status)) { + !DEV_SUPERSPEED_ANY(raw_port_status)) { if ((raw_port_status & PORT_RESET) || !(raw_port_status & PORT_PE)) return 0xffffffff; @@ -669,7 +786,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, && (raw_port_status & PORT_POWER) && (bus_state->suspended_ports & (1 << wIndex))) { bus_state->suspended_ports &= ~(1 << wIndex); - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) bus_state->port_c_suspend |= 1 << wIndex; } if (raw_port_status & PORT_CONNECT) { @@ -683,13 +800,13 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, if (raw_port_status & PORT_RESET) status |= USB_PORT_STAT_RESET; if (raw_port_status & PORT_POWER) { - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) status |= USB_SS_PORT_STAT_POWER; else status |= USB_PORT_STAT_POWER; } /* Update Port Link State */ - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status); /* * Verify if all USB3 Ports Have entered U0 already. @@ -734,7 +851,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, * descriptor for the USB 3.0 roothub. If not, we stall the * endpoint, like external hubs do. */ - if (hcd->speed == HCD_USB3 && + if (hcd->speed >= HCD_USB3 && (wLength < USB_DT_SS_HUB_SIZE || wValue != (USB_DT_SS_HUB << 8))) { xhci_dbg(xhci, "Wrong hub descriptor type for " @@ -748,25 +865,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, if ((wValue & 0xff00) != (USB_DT_BOS << 8)) goto error; - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) goto error; - /* Set the U1 and U2 exit latencies. */ - memcpy(buf, &usb_bos_descriptor, - USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE); - if ((xhci->quirks & XHCI_LPM_SUPPORT)) { - temp = readl(&xhci->cap_regs->hcs_params3); - buf[12] = HCS_U1_LATENCY(temp); - put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]); - } - - /* Indicate whether the host has LTM support. */ - temp = readl(&xhci->cap_regs->hcc_params); - if (HCC_LTC(temp)) - buf[8] |= USB_LTM_SUPPORT; - + retval = xhci_create_usb3_bos_desc(xhci, buf, wLength); spin_unlock_irqrestore(&xhci->lock, flags); - return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; + return retval; case GetPortStatus: if (!wIndex || wIndex > max_ports) goto error; @@ -786,6 +890,19 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_dbg(xhci, "Get port status returned 0x%x\n", status); put_unaligned(cpu_to_le32(status), (__le32 *) buf); + /* if USB 3.1 extended port status return additional 4 bytes */ + if (wValue == 0x02) { + u32 port_li; + + if (hcd->speed < HCD_USB31 || wLength != 8) { + xhci_err(xhci, "get ext port status invalid parameter\n"); + retval = -EINVAL; + break; + } + port_li = readl(port_array[wIndex] + PORTLI); + status = xhci_get_ext_port_status(temp, port_li); + put_unaligned_le32(cpu_to_le32(status), &buf[4]); + } break; case SetPortFeature: if (wValue == USB_PORT_FEAT_LINK_STATE) @@ -952,7 +1069,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = readl(port_array[wIndex]); break; case USB_PORT_FEAT_U1_TIMEOUT: - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) goto error; temp = readl(port_array[wIndex] + PORTPMSC); temp &= ~PORT_U1_TIMEOUT_MASK; @@ -960,7 +1077,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, writel(temp, port_array[wIndex] + PORTPMSC); break; case USB_PORT_FEAT_U2_TIMEOUT: - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) goto error; temp = readl(port_array[wIndex] + PORTPMSC); temp &= ~PORT_U2_TIMEOUT_MASK; @@ -1223,14 +1340,14 @@ int xhci_bus_resume(struct usb_hcd *hcd) u32 temp; temp = readl(port_array[port_index]); - if (DEV_SUPERSPEED(temp)) + if (DEV_SUPERSPEED_ANY(temp)) temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); else temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(port_index, &bus_state->bus_suspended) && (temp & PORT_PLS_MASK)) { set_bit(port_index, &port_was_suspended); - if (!DEV_SUPERSPEED(temp)) { + if (!DEV_SUPERSPEED_ANY(temp)) { xhci_set_link_state(xhci, port_array, port_index, XDEV_RESUME); need_usb2_u3_exit = true; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 41f841fa6c4d..c48cbe731356 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1828,24 +1828,20 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); - if (xhci->segment_pool) - dma_pool_destroy(xhci->segment_pool); + dma_pool_destroy(xhci->segment_pool); xhci->segment_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); - if (xhci->device_pool) - dma_pool_destroy(xhci->device_pool); + dma_pool_destroy(xhci->device_pool); xhci->device_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); - if (xhci->small_streams_pool) - dma_pool_destroy(xhci->small_streams_pool); + dma_pool_destroy(xhci->small_streams_pool); xhci->small_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed small stream array pool"); - if (xhci->medium_streams_pool) - dma_pool_destroy(xhci->medium_streams_pool); + dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed medium stream array pool"); @@ -2072,14 +2068,23 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, { u32 temp, port_offset, port_count; int i; + struct xhci_hub *rhub; - if (major_revision > 0x03) { + temp = readl(addr); + + if (XHCI_EXT_PORT_MAJOR(temp) == 0x03) { + rhub = &xhci->usb3_rhub; + } else if (XHCI_EXT_PORT_MAJOR(temp) <= 0x02) { + rhub = &xhci->usb2_rhub; + } else { xhci_warn(xhci, "Ignoring unknown port speed, " "Ext Cap %p, revision = 0x%x\n", addr, major_revision); /* Ignoring port protocol we can't understand. FIXME */ return; } + rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); + rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); /* Port offset and count in the third dword, see section 7.2 */ temp = readl(addr + 2); @@ -2094,6 +2099,33 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; + rhub->psi_count = XHCI_EXT_PORT_PSIC(temp); + if (rhub->psi_count) { + rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi), + GFP_KERNEL); + if (!rhub->psi) + rhub->psi_count = 0; + + rhub->psi_uid_count++; + for (i = 0; i < rhub->psi_count; i++) { + rhub->psi[i] = readl(addr + 4 + i); + + /* count unique ID values, two consecutive entries can + * have the same ID if link is assymetric + */ + if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) != + XHCI_EXT_PORT_PSIV(rhub->psi[i - 1]))) + rhub->psi_uid_count++; + + xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", + XHCI_EXT_PORT_PSIV(rhub->psi[i]), + XHCI_EXT_PORT_PSIE(rhub->psi[i]), + XHCI_EXT_PORT_PLT(rhub->psi[i]), + XHCI_EXT_PORT_PFD(rhub->psi[i]), + XHCI_EXT_PORT_LP(rhub->psi[i]), + XHCI_EXT_PORT_PSIM(rhub->psi[i])); + } + } /* cache usb2 port capabilities */ if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) xhci->ext_caps[xhci->num_ext_caps++] = temp; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c79d33676672..012d7f4c2901 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -200,15 +200,17 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + xhci = hcd_to_xhci(hcd); + if (!xhci->sbrn) + pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn); + retval = xhci_gen_setup(hcd, xhci_pci_quirks); if (retval) return retval; - xhci = hcd_to_xhci(hcd); if (!usb_hcd_is_primary_hcd(hcd)) return 0; - pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn); xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 890ad9d9d329..05647e6753cd 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -19,6 +19,7 @@ #include <linux/usb/phy.h> #include <linux/slab.h> #include <linux/usb/xhci_pdriver.h> +#include <linux/acpi.h> #include "xhci.h" #include "xhci-mvebu.h" @@ -93,14 +94,20 @@ static int xhci_plat_probe(struct platform_device *pdev) if (irq < 0) return -ENODEV; - /* Initialize dma_mask and coherent_dma_mask to 32-bits */ - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) - return ret; - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + /* Try to set 64-bit DMA first */ + if (WARN_ON(!pdev->dev.dma_mask)) + /* Platform did not initialize dma_mask */ + ret = dma_coerce_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(64)); else - dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + + /* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */ + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + } hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) @@ -262,6 +269,13 @@ static const struct of_device_id usb_xhci_of_match[] = { MODULE_DEVICE_TABLE(of, usb_xhci_of_match); #endif +static const struct acpi_device_id usb_xhci_acpi_match[] = { + /* XHCI-compliant USB Controller */ + { "PNP0D10", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); + static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, @@ -269,6 +283,7 @@ static struct platform_driver usb_xhci_driver = { .name = "xhci-hcd", .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(usb_xhci_of_match), + .acpi_match_table = ACPI_PTR(usb_xhci_acpi_match), }, }; MODULE_ALIAS("platform:xhci-hcd"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 43291f93afeb..4c54ccc1583a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1453,7 +1453,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, * 1.1 ports are under the USB 2.0 hub. If the port speed * matches the device speed, it's a similar speed port. */ - if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) + if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3)) num_similar_speed_ports++; } return num_similar_speed_ports; @@ -1515,7 +1515,7 @@ static void handle_port_status(struct xhci_hcd *xhci, /* Find the right roothub. */ hcd = xhci_to_hcd(xhci); - if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) + if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3)) hcd = xhci->shared_hcd; if (major_revision == 0) { @@ -1541,7 +1541,7 @@ static void handle_port_status(struct xhci_hcd *xhci, * correct bus_state structure. */ bus_state = &xhci->bus_state[hcd_index(hcd)]; - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) port_array = xhci->usb3_ports; else port_array = xhci->usb2_ports; @@ -1555,7 +1555,7 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } - if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) + if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) bus_state->port_remote_wakeup &= ~(1 << faked_port_index); if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { @@ -1567,7 +1567,7 @@ static void handle_port_status(struct xhci_hcd *xhci, goto cleanup; } - if (DEV_SUPERSPEED(temp)) { + if (DEV_SUPERSPEED_ANY(temp)) { xhci_dbg(xhci, "remote wake SS port %d\n", port_id); /* Set a flag to say the port signaled remote wakeup, * so we can tell the difference between the end of @@ -1595,7 +1595,7 @@ static void handle_port_status(struct xhci_hcd *xhci, } if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && - DEV_SUPERSPEED(temp)) { + DEV_SUPERSPEED_ANY(temp)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); /* We've just brought the device into U0 through either the * Resume state after a device remote wakeup, or through the @@ -1625,7 +1625,7 @@ static void handle_port_status(struct xhci_hcd *xhci, * RExit to a disconnect state). If so, let the the driver know it's * out of the RExit state. */ - if (!DEV_SUPERSPEED(temp) && + if (!DEV_SUPERSPEED_ANY(temp) && test_and_clear_bit(faked_port_index, &bus_state->rexit_ports)) { complete(&bus_state->rexit_done[faked_port_index]); @@ -1633,7 +1633,7 @@ static void handle_port_status(struct xhci_hcd *xhci, goto cleanup; } - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) xhci_test_and_clear_bit(xhci, port_array, faked_port_index, PORT_PLC); @@ -3029,21 +3029,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } /* - * The TD size is the number of bytes remaining in the TD (including this TRB), - * right shifted by 10. - * It must fit in bits 21:17, so it can't be bigger than 31. - */ -static u32 xhci_td_remainder(unsigned int remainder) -{ - u32 max = (1 << (21 - 17 + 1)) - 1; - - if ((remainder >> 10) >= max) - return max << 17; - else - return (remainder >> 10) << 17; -} - -/* * For xHCI 1.0 host controllers, TD size is the number of max packet sized * packets remaining in the TD (*not* including this TRB). * @@ -3055,30 +3040,36 @@ static u32 xhci_td_remainder(unsigned int remainder) * * TD size = total_packet_count - packets_transferred * - * It must fit in bits 21:17, so it can't be bigger than 31. + * For xHCI 0.96 and older, TD size field should be the remaining bytes + * including this TRB, right shifted by 10 + * + * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. + * This is taken care of in the TRB_TD_SIZE() macro + * * The last TRB in a TD must have the TD size set to zero. */ -static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, - unsigned int total_packet_count, struct urb *urb, - unsigned int num_trbs_left) +static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + int trb_buff_len, unsigned int td_total_len, + struct urb *urb, unsigned int num_trbs_left) { - int packets_transferred; + u32 maxp, total_packet_count; + + if (xhci->hci_version < 0x100) + return ((td_total_len - transferred) >> 10); + + maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); /* One TRB with a zero-length data packet. */ - if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) + if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) || + trb_buff_len == td_total_len) return 0; - /* All the TRB queueing functions don't count the current TRB in - * running_total. - */ - packets_transferred = (running_total + trb_buff_len) / - GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); - - if ((total_packet_count - packets_transferred) > 31) - return 31 << 17; - return (total_packet_count - packets_transferred) << 17; + /* Queueing functions don't count the current TRB into transferred */ + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); } + static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { @@ -3200,17 +3191,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } /* Set the TRB length, TD size, and interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - - running_total); - } else { - remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, + urb->transfer_buffer_length, + urb, num_trbs - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); if (num_trbs > 1) @@ -3373,17 +3359,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_ISP; /* Set the TRB length, TD size, and interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - - running_total); - } else { - remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, + urb->transfer_buffer_length, + urb, num_trbs - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); if (num_trbs > 1) @@ -3421,7 +3402,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct usb_ctrlrequest *setup; struct xhci_generic_trb *start_trb; int start_cycle; - u32 field, length_field; + u32 field, length_field, remainder; struct urb_priv *urb_priv; struct xhci_td *td; @@ -3494,9 +3475,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, else field = TRB_TYPE(TRB_DATA); + remainder = xhci_td_remainder(xhci, 0, + urb->transfer_buffer_length, + urb->transfer_buffer_length, + urb, 1); + length_field = TRB_LEN(urb->transfer_buffer_length) | - xhci_td_remainder(urb->transfer_buffer_length) | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; @@ -3825,17 +3812,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, trb_buff_len = td_remain_len; /* Set the TRB length, TD size, & interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - td_len - running_total); - } else { - remainder = xhci_v1_0_td_remainder( - running_total, trb_buff_len, - total_packet_count, urb, - (trbs_per_td - j - 1)); - } + remainder = xhci_td_remainder(xhci, running_total, + trb_buff_len, td_len, + urb, trbs_per_td - j - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, more_trbs_coming, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9957bd96d4bc..6e7dc6f93978 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3973,7 +3973,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) __le32 __iomem *addr; int raw_port; - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) addr = xhci->usb2_ports[port1 - 1]; else addr = xhci->usb3_ports[port1 - 1]; @@ -4124,7 +4124,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, int hird, exit_latency; int ret; - if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4241,7 +4241,7 @@ int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int portnum = udev->portnum - 1; - if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || + if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || !udev->lpm_capable) return 0; @@ -4841,8 +4841,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* XHCI controllers don't stop the ep queue on short packets :| */ hcd->self.no_stop_on_short = 1; + xhci = hcd_to_xhci(hcd); + if (usb_hcd_is_primary_hcd(hcd)) { - xhci = hcd_to_xhci(hcd); xhci->main_hcd = hcd; /* Mark the first roothub as being USB 2.0. * The xHCI driver will register the USB 3.0 roothub. @@ -4856,6 +4857,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) */ hcd->has_tt = 1; } else { + if (xhci->sbrn == 0x31) { + xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); + hcd->speed = HCD_USB31; + } /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ @@ -4875,6 +4880,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); xhci->hci_version = HC_VERSION(xhci->hcc_params); xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); + if (xhci->hci_version > 0x100) + xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); xhci_print_registers(xhci); xhci->quirks = quirks; @@ -4906,6 +4913,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) !dma_set_mask(dev, DMA_BIT_MASK(64))) { xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + } else { + /* + * This is to avoid error in cases where a 32-bit USB + * controller is used on a 64-bit capable system. + */ + retval = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (retval) + return retval; + xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); } xhci_dbg(xhci, "Calling HCD init\n"); @@ -5020,7 +5037,7 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); - BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); + BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index dbda41e91c84..be9048e2d4d4 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -29,6 +29,8 @@ #include <linux/kernel.h> #include <linux/usb/hcd.h> +#include <asm-generic/io-64-nonatomic-lo-hi.h> + /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" #include "pci-quirks.h" @@ -56,6 +58,7 @@ * @hcc_params: HCCPARAMS - Capability Parameters * @db_off: DBOFF - Doorbell array offset * @run_regs_off: RTSOFF - Runtime register space offset + * @hcc_params2: HCCPARAMS2 Capability Parameters 2, xhci 1.1 only */ struct xhci_cap_regs { __le32 hc_capbase; @@ -65,6 +68,7 @@ struct xhci_cap_regs { __le32 hcc_params; __le32 db_off; __le32 run_regs_off; + __le32 hcc_params2; /* xhci 1.1 */ /* Reserved up to (CAPLENGTH - 0x1C) */ }; @@ -134,6 +138,21 @@ struct xhci_cap_regs { /* run_regs_off bitmask - bits 0:4 reserved */ #define RTSOFF_MASK (~0x1f) +/* HCCPARAMS2 - hcc_params2 - bitmasks */ +/* true: HC supports U3 entry Capability */ +#define HCC2_U3C(p) ((p) & (1 << 0)) +/* true: HC supports Configure endpoint command Max exit latency too large */ +#define HCC2_CMC(p) ((p) & (1 << 1)) +/* true: HC supports Force Save context Capability */ +#define HCC2_FSC(p) ((p) & (1 << 2)) +/* true: HC supports Compliance Transition Capability */ +#define HCC2_CTC(p) ((p) & (1 << 3)) +/* true: HC support Large ESIT payload Capability > 48k */ +#define HCC2_LEC(p) ((p) & (1 << 4)) +/* true: HC support Configuration Information Capability */ +#define HCC2_CIC(p) ((p) & (1 << 5)) +/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */ +#define HCC2_ETC(p) ((p) & (1 << 6)) /* Number of registers per port */ #define NUM_PORT_REGS 4 @@ -269,7 +288,11 @@ struct xhci_op_regs { /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ #define MAX_DEVS(p) ((p) & 0xff) -/* bits 8:31 - reserved and should be preserved */ +/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */ +#define CONFIG_U3E (1 << 8) +/* bit 9: Configuration Information Enable, xhci 1.1 */ +#define CONFIG_CIE (1 << 9) +/* bits 10:31 - reserved and should be preserved */ /* PORTSC - Port Status and Control Register - port_status_base bitmasks */ /* true: device connected */ @@ -306,11 +329,16 @@ struct xhci_op_regs { #define XDEV_LS (0x2 << 10) #define XDEV_HS (0x3 << 10) #define XDEV_SS (0x4 << 10) +#define XDEV_SSP (0x5 << 10) #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10)) #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) #define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS) #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) +#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) + /* Bits 20:23 in the Slot Context are the speed for the device */ #define SLOT_SPEED_FS (XDEV_FS << 10) #define SLOT_SPEED_LS (XDEV_LS << 10) @@ -394,6 +422,9 @@ struct xhci_op_regs { #define PORT_L1DS(p) (((p) & 0xff) << 8) #define PORT_HLE (1 << 16) +/* USB3 Protocol PORTLI Port Link Information */ +#define PORT_RX_LANES(p) (((p) >> 16) & 0xf) +#define PORT_TX_LANES(p) (((p) >> 20) & 0xf) /* USB2 Protocol PORTHLPMC */ #define PORT_HIRDM(p)((p) & 3) @@ -519,9 +550,23 @@ struct xhci_protocol_caps { }; #define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f) #define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) #define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) +#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f) +#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03) +#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03) +#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01) +#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03) +#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff) + +#define PLT_MASK (0x03 << 6) +#define PLT_SYM (0x00 << 6) +#define PLT_ASYM_RX (0x02 << 6) +#define PLT_ASYM_TX (0x03 << 6) + /** * struct xhci_container_ctx * @type: Type of context. Used to calculated offsets to contained contexts. @@ -1136,6 +1181,8 @@ enum xhci_setup_dev { /* Normal TRB fields */ /* transfer_len bitmasks - bits 0:16 */ #define TRB_LEN(p) ((p) & 0x1ffff) +/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */ +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) /* Interrupter Target - which MSI-X vector to target the completion event at */ #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) @@ -1448,6 +1495,14 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd) return 1; } +struct xhci_hub { + u8 maj_rev; + u8 min_rev; + u32 *psi; /* array of protocol speed ID entries */ + u8 psi_count; + u8 psi_uid_count; +}; + /* There is one xhci_hcd structure per controller */ struct xhci_hcd { struct usb_hcd *main_hcd; @@ -1465,6 +1520,7 @@ struct xhci_hcd { __u32 hcs_params2; __u32 hcs_params3; __u32 hcc_params; + __u32 hcc_params2; spinlock_t lock; @@ -1586,6 +1642,8 @@ struct xhci_hcd { unsigned int num_usb3_ports; /* Array of pointers to USB 2.0 PORTSC registers */ __le32 __iomem **usb2_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; unsigned int num_usb2_ports; /* support xHCI 0.96 spec USB2 software LPM */ unsigned sw_lpm_support:1; @@ -1651,20 +1709,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) static inline u64 xhci_read_64(const struct xhci_hcd *xhci, __le64 __iomem *regs) { - __u32 __iomem *ptr = (__u32 __iomem *) regs; - u64 val_lo = readl(ptr); - u64 val_hi = readl(ptr + 1); - return val_lo + (val_hi << 32); + return lo_hi_readq(regs); } static inline void xhci_write_64(struct xhci_hcd *xhci, const u64 val, __le64 __iomem *regs) { - __u32 __iomem *ptr = (__u32 __iomem *) regs; - u32 val_lo = lower_32_bits(val); - u32 val_hi = upper_32_bits(val); - - writel(val_lo, ptr); - writel(val_hi, ptr + 1); + lo_hi_writeq(val, regs); } static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 3ad5d19e4d04..23c794813e6a 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data, if (this_time > max) this_time = max; - memcpy(data, dev->buf, this_time); + memcpy(data, dev->buf + dev->used, this_time); dev->used += this_time; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7b98e1d9194c..d82fa36c3465 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = { .compatible = "renesas,usbhs-r8a7794", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, + { + /* Gen3 is compatible with Gen2 */ + .compatible = "renesas,usbhs-r8a7795", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, { }, }; MODULE_DEVICE_TABLE(of, usbhs_of_match); @@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) return NULL; dparam = &info->driver_param; - dparam->type = of_id ? (u32)of_id->data : 0; + dparam->type = of_id ? (uintptr_t)of_id->data : 0; if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) dparam->buswait_bwait = tmp; gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 1bac215202d2..39afd7045c43 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1456,30 +1456,26 @@ static void isd200_free_info_ptrs(void *info_) */ static int isd200_init_info(struct us_data *us) { - int retStatus = ISD200_GOOD; struct isd200_info *info; info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL); if (!info) - retStatus = ISD200_ERROR; - else { - info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); - info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); - info->srb.sense_buffer = - kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); - if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { - isd200_free_info_ptrs(info); - kfree(info); - retStatus = ISD200_ERROR; - } - } + return ISD200_ERROR; - if (retStatus == ISD200_GOOD) { - us->extra = info; - us->extra_destructor = isd200_free_info_ptrs; + info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); + info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); + info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + + if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { + isd200_free_info_ptrs(info); + kfree(info); + return ISD200_ERROR; } - return retStatus; + us->extra = info; + us->extra_destructor = isd200_free_info_ptrs; + + return ISD200_GOOD; } /************************************************************************** diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index f68921909552..48ca9c204354 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -257,17 +257,16 @@ static void uas_stat_cmplt(struct urb *urb) struct uas_cmd_info *cmdinfo; unsigned long flags; unsigned int idx; + int status = urb->status; spin_lock_irqsave(&devinfo->lock, flags); if (devinfo->resetting) goto out; - if (urb->status) { - if (urb->status != -ENOENT && urb->status != -ECONNRESET) { - dev_err(&urb->dev->dev, "stat urb: status %d\n", - urb->status); - } + if (status) { + if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN) + dev_err(&urb->dev->dev, "stat urb: status %d\n", status); goto out; } @@ -348,6 +347,7 @@ static void uas_data_cmplt(struct urb *urb) struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata; struct scsi_data_buffer *sdb = NULL; unsigned long flags; + int status = urb->status; spin_lock_irqsave(&devinfo->lock, flags); @@ -374,9 +374,9 @@ static void uas_data_cmplt(struct urb *urb) goto out; } - if (urb->status) { - if (urb->status != -ENOENT && urb->status != -ECONNRESET) - uas_log_cmd_state(cmnd, "data cmplt err", urb->status); + if (status) { + if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN) + uas_log_cmd_state(cmnd, "data cmplt err", status); /* error: no data transfered */ sdb->resid = sdb->length; } else { diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index e9ef1eccdace..7fbe19d5279e 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -218,7 +218,7 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc) memset(desc, 0, sizeof(*desc)); desc->bDescriptorType = USB_DT_HUB; desc->bDescLength = 9; - desc->wHubCharacteristics = __constant_cpu_to_le16( + desc->wHubCharacteristics = cpu_to_le16( HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); desc->bNbrPorts = VHCI_NPORTS; desc->u.hs.DeviceRemovable[0] = 0xff; @@ -565,7 +565,9 @@ no_need_xmit: usb_hcd_unlink_urb_from_ep(hcd, urb); no_need_unlink: spin_unlock(&the_controller->lock); - usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); + if (!ret) + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), + urb, urb->status); return ret; } @@ -629,7 +631,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* URB was never linked! or will be soon given back by * vhci_rx. */ spin_unlock(&the_controller->lock); - return 0; + return -EIDRM; } { diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 0e5fde1d3ffb..9f9a7bef1ff6 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { dev_err(dev, "Invalid waveform\n"); err = -EINVAL; - goto err_failed; + goto err_fw; } mutex_lock(&(par->io_lock)); @@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, mutex_unlock(&(par->io_lock)); if (err < 0) { dev_err(dev, "Failed to store broadsheet waveform\n"); - goto err_failed; + goto err_fw; } dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); - return len; + err = len; +err_fw: + release_firmware(fw_entry); err_failed: return err; } diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fa2e6f9e322..b335c1ae8625 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) static int fsl_diu_resume(struct platform_device *ofdev) { struct fsl_diu_data *data; + unsigned int i; data = dev_get_drvdata(&ofdev->dev); - enable_lcdc(data->fsl_diu_info); + + fsl_diu_enable_interrupts(data); + update_lcdc(data->fsl_diu_info); + for (i = 0; i < NUM_AOIS; i++) { + if (data->mfb[i].count) + fsl_diu_enable_panel(&data->fsl_diu_info[i]); + } return 0; } diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index 9b8bebdf8f86..f9ec5c0484fa 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = { { .compatible = "fujitsu,coral", }, { /* end */ } }; +MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); static struct platform_driver of_platform_mb862xxfb_driver = { .driver = { diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index a8ce920fa797..d811e6dcaef7 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c @@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { - adapter = of_find_i2c_adapter_by_node(adapter_node); + adapter = of_get_i2c_adapter_by_node(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 90cbc4c3406c..c581231c74a5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = { { .compatible = "omapdss,sony,acx565akm", }, {}, }; +MODULE_DEVICE_TABLE(of, acx565akm_of_match); static struct spi_driver acx565akm_driver = { .driver = { diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 7ed9a227f5ea..01b43e9ce941 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data, writemmr(par, DST1, point(x, y)); writemmr(par, DST2, point(x + w - 1, y + h - 1)); - memcpy(par->io_virt + 0x10000, data, 4 * size); + iowrite32_rep(par->io_virt + 0x10000, data, size); } static void blade_copy_rect(struct tridentfb_par *par, @@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par) static inline void set_lwidth(struct tridentfb_par *par, int width) { write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); - write3X4(par, AddColReg, - (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); + /* chips older than TGUI9660 have only 1 width bit in AddColReg */ + /* touching the other one breaks I2C/DDC */ + if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); + else + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); } /* For resolutions smaller than FP resolution stretch */ diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32d8275e4c88..8a1076beecd3 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np) */ pr_err("%s: error in timing %d\n", of_node_full_name(np), disp->num_timings + 1); + kfree(dt); goto timingfail; } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -817,8 +817,9 @@ config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on (X86 || IA64) && PCI select WATCHDOG_CORE + depends on I2C || I2C=n select LPC_ICH if !EXPERT - select I2C_I801 if !EXPERT + select I2C_I801 if !EXPERT && I2C ---help--- Hardware driver for the intel TCO timer based watchdog devices. These drivers are included in the Intel 82801 I/O Controller diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -36,6 +36,13 @@ #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 #define PM_RSTC_RESET 0x00000102 +/* + * The Raspberry Pi firmware uses the RSTS register to know which partiton + * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. + * Partiton 63 is a special partition used by the firmware to indicate halt. + */ +#define PM_RSTS_RASPBERRYPI_HALT 0x555 + #define SECS_TO_WDOG_TICKS(x) ((x) << 16) #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) @@ -151,8 +158,7 @@ static void bcm2835_power_off(void) * hard reset. */ val = readl_relaxed(wdt->base + PM_RSTS); - val &= PM_RSTC_WRCFG_CLR; - val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; + val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; writel_relaxed(val, wdt->base + PM_RSTS); /* Continue with normal reset mechanism */ diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, gef_wdt_ids); static struct platform_driver gef_wdt_driver = { .driver = { diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { { .compatible = "men,a021-wdt" }, { }, }; +MODULE_DEVICE_TABLE(of, a21_wdt_ids); static struct platform_driver a21_wdt_driver = { .probe = a21_wdt_probe, diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c @@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { { .compatible = "moxa,moxart-watchdog" }, { }, }; +MODULE_DEVICE_TABLE(of, moxart_watchdog_match); static struct platform_driver moxart_wdt_driver = { .probe = moxart_wdt_probe, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 295795aebe0b..1e60d00d4ea7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb, !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); + if (!IS_ERR(chunk_root->node)) + free_extent_buffer(chunk_root->node); chunk_root->node = NULL; goto fail_tree_roots; } @@ -2885,6 +2887,8 @@ retry_root_backup: !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); + if (!IS_ERR(tree_root->node)) + free_extent_buffer(tree_root->node); tree_root->node = NULL; goto recovery_tree_root; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 8d052209f473..2513a7f53334 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, u32 generation; if (fh_type == FILEID_BTRFS_WITH_PARENT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE) return NULL; root_objectid = fid->root_objectid; } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) return NULL; root_objectid = fid->parent_root_objectid; } else @@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, u32 generation; if ((fh_type != FILEID_BTRFS_WITH_PARENT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE) && (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) && (fh_type != FILEID_BTRFS_WITHOUT_PARENT || - fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) + fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE)) return NULL; objectid = fid->objectid; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f9604201333..601d7d45d164 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2844,6 +2845,7 @@ again: #ifdef SCRAMBLE_DELAYED_REFS delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); #endif + trans->can_flush_pending_bgs = false; ret = __btrfs_run_delayed_refs(trans, root, count); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); @@ -2893,6 +2895,7 @@ again: } out: assert_qgroups_uptodate(trans); + trans->can_flush_pending_bgs = can_flush_pending_bgs; return 0; } @@ -4306,7 +4309,8 @@ out: * the block groups that were made dirty during the lifetime of the * transaction. */ - if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { + if (trans->can_flush_pending_bgs && + trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { btrfs_create_pending_block_groups(trans, trans->root); btrfs_trans_release_chunk_metadata(trans); } @@ -9560,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, struct btrfs_block_group_item item; struct btrfs_key key; int ret = 0; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; + trans->can_flush_pending_bgs = false; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { if (ret) goto next; @@ -9581,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, next: list_del_init(&block_group->bg_list); } + trans->can_flush_pending_bgs = can_flush_pending_bgs; } int btrfs_make_block_group(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e2357e31609a..3915c9473e94 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3132,12 +3132,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { struct inode *inode; struct btrfs_ordered_extent *ordered; int index; - u64 prev_em_start = (u64)-1; inode = pages[0]->mapping->host; while (1) { @@ -3153,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, for (index = 0; index < nr_pages; index++) { __do_readpage(tree, pages[index], get_extent, em_cached, bio, - mirror_num, bio_flags, rw, &prev_em_start); + mirror_num, bio_flags, rw, prev_em_start); page_cache_release(pages[index]); } } @@ -3163,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree, int nr_pages, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { u64 start = 0; u64 end = 0; @@ -3184,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree, index - first_index, start, end, get_extent, em_cached, bio, mirror_num, bio_flags, - rw); + rw, prev_em_start); start = page_start; end = start + PAGE_CACHE_SIZE - 1; first_index = index; @@ -3195,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree, __do_contiguous_readpages(tree, &pages[first_index], index - first_index, start, end, get_extent, em_cached, bio, - mirror_num, bio_flags, rw); + mirror_num, bio_flags, rw, + prev_em_start); } static int __extent_read_full_page(struct extent_io_tree *tree, @@ -4207,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree, struct page *page; struct extent_map *em_cached = NULL; int nr = 0; + u64 prev_em_start = (u64)-1; for (page_idx = 0; page_idx < nr_pages; page_idx++) { page = list_entry(pages->prev, struct page, lru); @@ -4223,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree, if (nr < ARRAY_SIZE(pagepool)) continue; __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); nr = 0; } if (nr) __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); if (em_cached) free_extent_map(em_cached); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index aa72bfd28f7d..a739b825bdd3 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx, /* * We know that it is or will be overwritten. Check this now. * The current inode being processed might have been the one that caused - * inode 'ino' to be orphanized, therefore ow_inode can actually be the - * same as sctx->send_progress. + * inode 'ino' to be orphanized, therefore check if ow_inode matches + * the current inode being processed. */ - if (ow_inode <= sctx->send_progress) + if ((ow_inode < sctx->send_progress) || + (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && + gen == sctx->cur_inode_gen)) ret = 1; else ret = 0; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 74bc3338418b..a5b06442f0bf 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -557,6 +557,7 @@ again: h->delayed_ref_elem.seq = 0; h->type = type; h->allocating_chunk = false; + h->can_flush_pending_bgs = true; h->reloc_reserved = false; h->sync = false; INIT_LIST_HEAD(&h->qgroup_ref_list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 87964bf8892d..a994bb097ee5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -118,6 +118,7 @@ struct btrfs_trans_handle { short aborted; short adding_csums; bool allocating_chunk; + bool can_flush_pending_bgs; bool reloc_reserved; bool sync; unsigned int type; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 27aea110e923..c3cc1609025f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.07" +#define CIFS_VERSION "2.08" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f621b44cb800..6b66dd5d1540 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, struct tcon_link *tlink = NULL; struct cifs_tcon *tcon = NULL; struct TCP_Server_Info *server; - struct cifs_io_parms io_parms; /* * To avoid spurious oplock breaks from server, in the case of @@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, rc = -ENOSYS; cifsFileInfo_put(open_file); cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - unsigned int bytes_written; - - io_parms.netfid = open_file->fid.netfid; - io_parms.pid = open_file->pid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, - NULL, NULL, 1); - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc); - } } else rc = -EINVAL; @@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, else rc = -ENOSYS; cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - __u16 netfid; - int oplock = 0; - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN, - GENERIC_WRITE, CREATE_NOT_DIR, &netfid, - &oplock, NULL, cifs_sb->local_nls, - cifs_remap(cifs_sb)); - if (rc == 0) { - unsigned int bytes_written; - - io_parms.netfid = netfid; - io_parms.pid = current->tgid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL, - NULL, 1); - cifs_dbg(FYI, "wrt seteof rc %d\n", rc); - CIFSSMBClose(xid, tcon, netfid); - } - } if (tlink) cifs_put_tlink(tlink); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index ce83e2edbe0a..597a417ba94d 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, if (tcon && tcon->bad_network_name) return -ENOENT; - if ((tcon->seal) && + if ((tcon && tcon->seal) && ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { cifs_dbg(VFS, "encryption requested but no server support"); return -EOPNOTSUPP; @@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) goto fallback; + sector = bh.b_blocknr << (blkbits - 9); + if (buffer_unwritten(&bh) || buffer_new(&bh)) { int i; + + length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, + bh.b_size); + if (length < 0) { + result = VM_FAULT_SIGBUS; + goto out; + } + if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) + goto fallback; + for (i = 0; i < PTRS_PER_PMD; i++) clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); wmb_pmem(); @@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, result = VM_FAULT_NOPAGE; spin_unlock(ptl); } else { - sector = bh.b_blocknr << (blkbits - 9); length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, bh.b_size); if (length < 0) { diff --git a/fs/namei.c b/fs/namei.c index 726d211db484..33e9495a3129 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd, negative = d_is_negative(dentry); if (read_seqcount_retry(&dentry->d_seq, seq)) return -ECHILD; - if (negative) - return -ENOENT; /* * This sequence count validates that the parent had no @@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd, goto unlazy; } } + /* + * Note: do negative dentry check after revalidation in + * case that drops it. + */ + if (negative) + return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f93b9cdb4934..5133bb18830e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); - if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { + switch (data->o_arg.claim) { + default: + break; + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + case NFS4_OPEN_CLAIM_DELEG_CUR_FH: pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); - } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) + return; + } + if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) nfs_inode_set_delegation(state->inode, data->owner->so_cred, &data->o_res); @@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); + write_seqlock(&state->seqlock); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); + write_sequnlock(&state->seqlock); clear_bit(NFS_DELEGATED_STATE, &state->flags); switch (type & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ|FMODE_WRITE: @@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) data->rpc_done = 0; data->rpc_status = 0; data->timestamp = jiffies; + if (data->is_recover) + nfs4_set_sequence_privileged(&data->c_arg.seq_args); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5db324635e92..d854693a15b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1725,7 +1725,8 @@ restart: if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; - atomic_inc(&sp->so_count); + if (!atomic_inc_not_zero(&sp->so_count)) + continue; spin_unlock(&clp->cl_lock); rcu_read_unlock(); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 28df12e525ba..671cf68fe56b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event, __entry->flags = flags; __entry->fmode = (__force unsigned int)ctx->mode; __entry->dev = ctx->dentry->d_sb->s_dev; - if (!IS_ERR(state)) + if (!IS_ERR_OR_NULL(state)) inode = state->inode; if (inode != NULL) { __entry->fileid = NFS_FILEID(inode); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 72624dc4a623..75ab7622e0cc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); ret = pgio->pg_error; - } + } else + nfs_add_stats(page_file_mapping(page)->host, + NFSIOS_WRITEPAGES, 1); out: return ret; } static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) { - struct inode *inode = page_file_mapping(page)->host; int ret; - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); if (ret == -EAGAIN) { @@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_pageio_descriptor pgio; + struct inode *inode = page_file_mapping(page)->host; int err; - nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); + nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, &nfs_async_write_completion_ops); err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); @@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino return 1; if (!flctx || (list_empty_careful(&flctx->flc_flock) && list_empty_careful(&flctx->flc_posix))) - return 0; + return 1; /* Check to see if there are whole file write locks */ ret = 0; diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, { int err; - mutex_lock(&inode->i_mutex); err = security_inode_init_security(inode, dentry, qstr, &init_xattrs, 0); - mutex_unlock(&inode->i_mutex); - if (err) { struct ubifs_info *c = dentry->i_sb->s_fs_info; ubifs_err(c, "cannot initialize security for inode %lu, error %d", diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include <linux/kernel.h> +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -568,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..0f408b002d98 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -217,6 +217,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -145,7 +145,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, - BLK_MQ_F_SYSFS_UP = 1 << 3, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq, int error); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, - void *priv); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -456,6 +456,8 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; + + bool mq_sysfs_init_done; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -int iommu_iova_cache_init(void); -void iommu_iova_cache_destroy(void); +int iova_cache_get(void); +void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..f644fdb06dd6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -161,6 +161,11 @@ enum { IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return d->of_node; +} + #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, irq_hw_number_t hwirq_max, int direct_max, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..6452ff4c463f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -242,7 +242,6 @@ struct mem_cgroup { * percpu counter. */ struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { u8 rsvd[8]; }; -struct mlx5_cmd_query_special_contexts_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_special_contexts_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 resd_lkey; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); struct mlx5_profile { u64 mask; diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ + page->mem_cgroup = memcg; +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - const int __n = ARRAY_SIZE(__crcu_array); \ - struct rcu_synchronize __rs_array[__n]; \ - \ - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2b0a30a6e31c..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); else if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_start_offset(skb) <= len) + skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; } diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index 447fe29b55b4..b9a28074210f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -122,6 +122,8 @@ enum usb_interface_condition { * has been deferred. * @needs_binding: flag set when the driver should be re-probed or unbound * following a reset or suspend operation it doesn't support. + * @authorized: This allows to (de)authorize individual interfaces instead + * a whole device in contrast to the device authorization. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. @@ -178,6 +180,7 @@ struct usb_interface { unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ unsigned resetting_device:1; /* true: bandwidth alloc after reset */ + unsigned authorized:1; /* used for interface authorization */ struct device dev; /* interface specific device info */ struct device *usb_dev; @@ -325,6 +328,7 @@ struct usb_host_bos { /* wireless cap descriptor is handled by wusb */ struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; }; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index d2784c10bfe2..f89c24bd53a4 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -58,12 +58,6 @@ * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more sharable. - * - * @authorized_default: Specifies if new devices are authorized to - * connect by default or they require explicit - * user space authorization; this bit is settable - * through /sys/class/usb_host/X/authorized_default. - * For the rest is RO, so we don't lock to r/w it. */ /*-------------------------------------------------------------------------*/ @@ -120,6 +114,8 @@ struct usb_hcd { #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ +#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ +#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -131,6 +127,22 @@ struct usb_hcd { #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + /* + * Specifies if interfaces are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/interface_authorized_default + */ +#define HCD_INTF_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) + + /* + * Specifies if devices are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/authorized_default + */ +#define HCD_DEV_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) + /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ @@ -141,7 +153,6 @@ struct usb_hcd { * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned wireless:1; /* Wireless USB HCD */ - unsigned authorized_default:1; unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ @@ -239,6 +250,7 @@ struct hc_driver { #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ +#define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - u32 type; + uintptr_t type; u32 enable_gpio; /* diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..cb1b9bbda332 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -63,7 +63,11 @@ struct unix_sock { #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; }; -#define unix_sk(__sk) ((struct unix_sock *)__sk) + +static inline struct unix_sock *unix_sk(struct sock *sk) +{ + return (struct unix_sock *)sk; +} #define peer_wait peer_wq.wait diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index f7adc6e01f9e..4338eb7b09b3 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -866,6 +866,35 @@ struct usb_ss_container_id_descriptor { } __attribute__((packed)); #define USB_DT_USB_SS_CONTN_ID_SIZE 20 + +/* + * SuperSpeed Plus USB Capability descriptor: Defines the set of + * SuperSpeed Plus USB specific device level capabilities + */ +#define USB_SSP_CAP_TYPE 0xa +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; +#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ +#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ + __u16 wFunctionalitySupport; +#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) +#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) +#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) + __le16 wReserved; + __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */ +#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */ +#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */ +#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */ +#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */ +#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */ +#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ +} __attribute__((packed)); + + /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,8 +11,6 @@ #include <linux/types.h> -#include <linux/compiler.h> - #define UFFD_API ((__u64)0xAA) /* * After implementing the respective features it will become: diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index 9ce083960a25..f18490985fc8 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -107,5 +107,13 @@ struct sched_watchdog { #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ +/* + * Domain asked to perform 'soft reset' for it. The expected behavior is to + * reset internal Xen state for the domain returning it to the point where it + * was created but leaving the domain's memory contents and vCPU contexts + * intact. This will allow the domain to start over and set up all Xen specific + * interfaces again. + */ +#define SHUTDOWN_soft_reset 5 #endif /* __XEN_PUBLIC_SCHED_H__ */ diff --git a/ipc/msg.c b/ipc/msg.c index 66c4f567eb73..1471db9a7e61 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; - } - msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); diff --git a/ipc/shm.c b/ipc/shm.c index 222131e8e38f..41787276e141 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; @@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; + goto no_id; + } + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); /* diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) rcu_read_lock(); spin_lock(&new->lock); + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) ids->in_use++; - current_euid_egid(&euid, &egid); - new->cuid = new->uid = euid; - new->gid = new->cgid = egid; - if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) PERF_EVENT_STATE_INACTIVE; } -/* - * Called at perf_event creation and when events are attached/detached from a - * group. - */ -static void perf_event__read_size(struct perf_event *event) +static void __perf_event_read_size(struct perf_event *event, int nr_siblings) { int entry = sizeof(u64); /* value */ int size = 0; @@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { - nr += event->group_leader->nr_siblings; + nr += nr_siblings; size += sizeof(u64); } @@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) event->read_size = size; } -static void perf_event__header_size(struct perf_event *event) +static void __perf_event_header_size(struct perf_event *event, u64 sample_type) { struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; u16 size = 0; - perf_event__read_size(event); - if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); @@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) event->header_size = size; } +/* + * Called at perf_event creation and when events are attached/detached from a + * group. + */ +static void perf_event__header_size(struct perf_event *event) +{ + __perf_event_read_size(event, + event->group_leader->nr_siblings); + __perf_event_header_size(event, event->attr.sample_type); +} + static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; @@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) event->id_header_size = size; } +static bool perf_event_validate_size(struct perf_event *event) +{ + /* + * The values computed here will be over-written when we actually + * attach the event. + */ + __perf_event_read_size(event, event->group_leader->nr_siblings + 1); + __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); + perf_event__id_header_size(event); + + /* + * Sum the lot; should not exceed the 64k limit we have on records. + * Conservative limit to allow for callchains and other variable fields. + */ + if (event->read_size + event->header_size + + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) + return false; + + return true; +} + static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; @@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, if (move_group) { gctx = group_leader->ctx; + mutex_lock_double(&gctx->mutex, &ctx->mutex); + } else { + mutex_lock(&ctx->mutex); + } + if (!perf_event_validate_size(event)) { + err = -E2BIG; + goto err_locked; + } + + /* + * Must be under the same ctx::mutex as perf_install_in_context(), + * because we need to serialize with concurrent event creation. + */ + if (!exclusive_event_installable(event, ctx)) { + /* exclusive and group stuff are assumed mutually exclusive */ + WARN_ON_ONCE(move_group); + + err = -EBUSY; + goto err_locked; + } + + WARN_ON_ONCE(ctx->parent_ctx); + + if (move_group) { /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ - mutex_lock_double(&gctx->mutex, &ctx->mutex); - perf_remove_from_context(group_leader, false); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, perf_remove_from_context(sibling, false); put_ctx(gctx); } - } else { - mutex_lock(&ctx->mutex); - } - WARN_ON_ONCE(ctx->parent_ctx); - - if (move_group) { /* * Wait for everybody to stop referencing the events through * the old lists, before installing it on new lists. @@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); - } - if (!exclusive_event_installable(event, ctx)) { - err = -EBUSY; - mutex_unlock(&ctx->mutex); - fput(event_file); - goto err_context; + /* + * Now that all events are installed in @ctx, nothing + * references @gctx anymore, so drop the last reference we have + * on it. + */ + put_ctx(gctx); } + /* + * Precalculate sample_data sizes; do while holding ctx::mutex such + * that we're serialized against further additions and before + * perf_install_in_context() which is the point the event is active and + * can use these values. + */ + perf_event__header_size(event); + perf_event__id_header_size(event); + perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); - if (move_group) { + if (move_group) mutex_unlock(&gctx->mutex); - put_ctx(gctx); - } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -8376,12 +8424,6 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(¤t->perf_event_mutex); /* - * Precalculate sample_data sizes - */ - perf_event__header_size(event); - perf_event__id_header_size(event); - - /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in @@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; +err_locked: + if (move_group) + mutex_unlock(&gctx->mutex); + mutex_unlock(&ctx->mutex); +/* err_file: */ + fput(event_file); err_context: perf_unpin_context(ctx); put_ctx(ctx); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index de41a68fc038..e25a83b67cce 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -22,7 +22,6 @@ /** * handle_bad_irq - handle spurious and unhandled irqs - * @irq: the interrupt number * @desc: description of the interrupt * * Handles spurious and unhandled IRQ's. It also prints a debugmessage. @@ -35,6 +34,7 @@ void handle_bad_irq(struct irq_desc *desc) kstat_incr_irqs_this_cpu(desc); ack_bad_irq(irq); } +EXPORT_SYMBOL_GPL(handle_bad_irq); /* * Special, empty irq handler: diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index e3a8c9577ba6..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -12,6 +12,7 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/mutex.h> #include "internals.h" @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) void register_irq_proc(unsigned int irq, struct irq_desc *desc) { + static DEFINE_MUTEX(register_lock); char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) return; + /* + * irq directories are registered only when a handler is + * added, not when the descriptor is created, so multiple + * tasks might try to register at the same time. + */ + mutex_lock(®ister_lock); + + if (desc->dir) + goto out_unlock; + memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - return; + goto out_unlock; #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); + +out_unlock: + mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, - int references) + int references, int pin_count) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif - hlock->pin_count = 0; + hlock->pin_count = pin_count; if (check && !mark_irqflags(curr, hlock)) return 0; @@ -3343,7 +3343,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3433,7 +3433,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip, 0); + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { + static struct lock_class_key rcu_exp_sched_rdp_class; unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); @@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) mutex_init(&rdp->exp_funnel_mutex); rcu_boot_init_nocb_percpu_data(rdp); raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (rsp == &rcu_sched_state) + lockdep_set_class_and_name(&rdp->exp_funnel_mutex, + &rcu_exp_sched_rdp_class, + "rcu_data_exp_sched"); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2f9c92884817..10a8faa1b0d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2517,11 +2517,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul <manfred@colorfullife.com> + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; vtime_task_switch(prev); @@ -4934,7 +4934,15 @@ void init_idle(struct task_struct *idle, int cpu) idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - do_set_cpus_allowed(idle, cpumask_of(cpu)); +#ifdef CONFIG_SMP + /* + * Its possible that init_idle() gets called multiple times on a task, + * in that case do_set_cpus_allowed() will not do the right thing. + * + * And since this is boot we can forgo the serialization. + */ + set_cpus_allowed_common(idle, cpumask_of(cpu)); +#endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -4951,7 +4959,7 @@ void init_idle(struct task_struct *idle, int cpu) rq->curr = rq->idle = idle; idle->on_rq = TASK_ON_RQ_QUEUED; -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP idle->on_cpu = 1; #endif raw_spin_unlock(&rq->lock); @@ -4966,7 +4974,7 @@ void init_idle(struct task_struct *idle, int cpu) idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 68cda117574c..6d2a119c7ad9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * After ->on_cpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. + * + * Pairs with the control dependency and rmb in try_to_wake_up(). */ - smp_wmb(); - prev->on_cpu = 0; + smp_store_release(&prev->on_cpu, 0); #endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) continue; /* Check the deviation from the watchdog clocksource. */ - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", cs->name); pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..84775ba873b9 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,10 @@ #include <linux/bug.h> #include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/word-at-a-time.h> +#include <asm/page.h> + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); + return res + find_zero(data); + } + *(unsigned long *)(dest+res) = c; + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; - if (dma < (page->dma + pool->allocation)) + if ((dma - page->dma) < pool->allocation) return page; } return NULL; diff --git a/mm/filemap.c b/mm/filemap.c index 72940fb38666..1cc5467cf36c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file, iov_iter_count(i)); again: + /* + * Bring in the user page that we will copy from _first_. + * Otherwise there's a nasty deadlock on copying from the + * same page as we're writing to, without it being marked + * up-to-date. + * + * Not only is this an optimisation, but it is also required + * to check that the address is actually valid, when atomic + * usercopies are used, below. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } + status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2480,17 +2495,8 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - /* - * 'page' is now locked. If we are trying to copy from a - * mapping of 'page' in userspace, the copy might fault and - * would need PageUptodate() to complete. But, page can not be - * made Uptodate without acquiring the page lock, which we hold. - * Deadlock. Avoid with pagefault_disable(). Fix up below with - * iov_iter_fault_in_readable(). - */ - pagefault_disable(); + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); - pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, @@ -2513,14 +2519,6 @@ again: */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); - /* - * This is the fallback to recover if the copy from - * userspace above faults. - */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { - status = -EFAULT; - break; - } goto again; } pos += copied; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, continue; /* + * Shared VMAs have their own reserves and do not affect + * MAP_PRIVATE accounting but it is possible that a shared + * VMA is using the same page so check and skip such VMAs. + */ + if (iter_vma->vm_flags & VM_MAYSHARE) + continue; + + /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..1fedbde68f59 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) } /* + * Return page count for single (non recursive) @memcg. + * * Implementation Note: reading percpu statistics for memcg. * * Both of vmstat[] and percpu_counter has threshold and do periodic * synchronization to implement "quick" read. There are trade-off between * reading cost and precision of value. Then, we may have a chance to implement - * a periodic synchronizion of counter in memcg's counter. + * a periodic synchronization of counter in memcg's counter. * * But this _read() function is used for user interface now. The user accounts * memory usage by memory cgroup and he _always_ requires exact value because @@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) * * If there are kernel internal actions which can make use of some not-exact * value, and reading all cpu value can be performance bottleneck in some - * common workload, threashold and synchonization as vmstat[] should be + * common workload, threshold and synchronization as vmstat[] should be * implemented. */ -static long mem_cgroup_read_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long +mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { long val = 0; int cpu; + /* Per-cpu values can be negative, use a signed accumulator */ for_each_possible_cpu(cpu) val += per_cpu(memcg->stat->count[idx], cpu); + /* + * Summing races with updates, so val may be negative. Avoid exposing + * transient negative values. + */ + if (val < 0) + val = 0; return val; } @@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], K(mem_cgroup_read_stat(iter, i))); } @@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; - long val = 0; + unsigned long val = 0; - /* Per-cpu values can be negative, use a signed accumulator */ for_each_mem_cgroup_tree(iter, memcg) val += mem_cgroup_read_stat(iter, idx); - if (val < 0) /* race ? */ - val = 0; return val; } @@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); } @@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - long long val = 0; + unsigned long long val = 0; if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; for_each_mem_cgroup_tree(mi, memcg) val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); } for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { @@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg_wb_domain_init(memcg, GFP_KERNEL)) goto out_free_stat; - spin_lock_init(&memcg->pcp_counter_lock); return memcg; out_free_stat: diff --git a/mm/migrate.c b/mm/migrate.c index 7452a00bbb50..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (PageSwapBacked(page)) SetPageSwapBacked(newpage); + /* + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus + * needs newpage's memcg set to transfer memcg dirty page accounting. + * So perform memcg migration in two steps: + * 1. set newpage->mem_cgroup (here) + * 2. clear page->mem_cgroup (below) + */ + set_page_memcg(newpage, page_memcg(page)); + mapping = page_mapping(page); if (!mapping) rc = migrate_page(mapping, newpage, page, mode); @@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, rc = fallback_migrate_page(mapping, newpage, page, mode); if (rc != MIGRATEPAGE_SUCCESS) { + set_page_memcg(newpage, NULL); newpage->mapping = NULL; } else { - mem_cgroup_migrate(page, newpage, false); + set_page_memcg(page, NULL); if (page_was_mapped) remove_migration_ptes(page, newpage); page->mapping = NULL; diff --git a/mm/slab.c b/mm/slab.c index c77ebe6cc87c..4fcc5dd8d5a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= kmalloc_size(INDEX_NODE + 1) - && cachep->object_size > cache_line_size() - && ALIGN(size, cachep->align) < PAGE_SIZE) { + /* + * To activate debug pagealloc, off-slab management is necessary + * requirement. In early phase of initialization, small sized slab + * doesn't get initialized so it would not be possible. So, we need + * to check size >= 256. It guarantees that all necessary small + * sized slab is initialized in current slab initialization sequence. + */ + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + size >= 256 && cachep->object_size > cache_line_size() && + ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 805a95a48107..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -31,7 +31,6 @@ static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; -static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; @@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) - ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); + ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); */ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { + unsigned char *data = skb->data; + BUG_ON(len > skb->len); - skb->len -= len; - BUG_ON(skb->len < skb->data_len); - skb_postpull_rcsum(skb, skb->data, len); - return skb->data += len; + __skb_pull(skb, len); + skb_postpull_rcsum(skb, data, len); + return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) static int dsa_slave_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) { - int ret = 0; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; switch (attr->id) { case SWITCHDEV_ATTR_PORT_STP_STATE: - if (attr->trans == SWITCHDEV_TRANS_COMMIT) - ret = dsa_slave_stp_update(dev, attr->u.stp_state); + if (attr->trans == SWITCHDEV_TRANS_PREPARE) + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; + else + ret = ds->drv->port_stp_update(ds, p->port, + attr->u.stp_state); break; default: ret = -EOPNOTSUPP; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; + fl4.flowi4_flags = 0; no_addr = idev->ifa_list == NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c6ad99ad0ffb..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; + fl4.flowi4_flags = 0; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res, 0); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f204089e854c..cb32ce250db0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, fl6->flowi6_iif = LOOPBACK_IFINDEX; - if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || + fl6->flowi6_oif) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl6->saddr)) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) tunnel = container_of(work, struct l2tp_tunnel, del_work); sk = l2tp_tunnel_sock_lookup(tunnel); if (!sk) - return; + goto out; sock = sk->sk_socket; @@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } l2tp_tunnel_sock_put(sk); +out: + l2tp_tunnel_dec_refcount(tunnel); } /* Create a socket for the tunnel, if one isn't set up by @@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { + l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_closeall(tunnel); - return (false == queue_work(l2tp_wq, &tunnel->del_work)); + if (false == queue_work(l2tp_wq, &tunnel->del_work)) { + l2tp_tunnel_dec_refcount(tunnel); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, * within this document. * * Our basic strategy is to round-robin transports in priorities - * according to sctp_state_prio_map[] e.g., if no such + * according to sctp_trans_score() e.g., if no such * transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. */ -static const u8 sctp_trans_state_to_prio_map[] = { - [SCTP_ACTIVE] = 3, /* best case */ - [SCTP_UNKNOWN] = 2, - [SCTP_PF] = 1, - [SCTP_INACTIVE] = 0, /* worst case */ -}; - static u8 sctp_trans_score(const struct sctp_transport *trans) { - return sctp_trans_state_to_prio_map[trans->state]; + switch (trans->state) { + case SCTP_ACTIVE: + return 3; /* best case */ + case SCTP_UNKNOWN: + return 2; + case SCTP_PF: + return 1; + default: /* case SCTP_INACTIVE */ + return 0; /* worst case */ + } } static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); /* Check whether a task is in the sock. */ - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) transport, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -285,11 +286,12 @@ out_unlock: static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); int error = 0; - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); @@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, (void *)timeout_type, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } @@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); - if (error) - asoc->base.sk->sk_err = -error; + if (error) + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -39,25 +39,6 @@ static int fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; - struct ib_mr *mr; - - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - } else { - mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(mr)) { - pr_err("%s: ib_get_dma_mr for failed with %lX\n", - __func__, PTR_ERR(mr)); - return -ENOMEM; - } - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; - ia->ri_dma_mr = mr; - } - return 0; } diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct ib_device_attr *devattr = &ia->ri_devattr; int depth, delta; - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, devattr->max_fast_reg_page_list_len); diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -23,7 +23,6 @@ static int physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_mr *mr; /* Obtain an rkey to use for RPC data payloads. @@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, __func__, PTR_ERR(mr)); return -ENOMEM; } - ia->ri_dma_mr = mr; - - /* Obtain an lkey to use for regbufs. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - else - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; + ia->ri_dma_mr = mr; return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index cb5174284074..5f6ca47092b0 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->read_hdr = head; pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); for (pno = 0; pno < pages_needed; pno++) { int len = min_t(int, rs_length, PAGE_SIZE - pg_off); @@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->frmr = frmr; pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); frmr->direction = DMA_FROM_DEVICE; diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 64443eb754ad..41e452bc580c 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) xprt_clear_connected(xprt); - rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); + rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ia_close(&r_xprt->rx_ia); xprt_rdma_free_addresses(xprt); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..8a477e27bad7 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -755,19 +755,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) cancel_delayed_work_sync(&ep->rep_connect_worker); - if (ia->ri_id->qp) { + if (ia->ri_id->qp) rpcrdma_ep_disconnect(ep, ia); + + rpcrdma_clean_cq(ep->rep_attr.recv_cq); + rpcrdma_clean_cq(ep->rep_attr.send_cq); + + if (ia->ri_id->qp) { rdma_destroy_qp(ia->ri_id); ia->ri_id->qp = NULL; } - rpcrdma_clean_cq(ep->rep_attr.recv_cq); rc = ib_destroy_cq(ep->rep_attr.recv_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, rc); - rpcrdma_clean_cq(ep->rep_attr.send_cq); rc = ib_destroy_cq(ep->rep_attr.send_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", @@ -1252,7 +1255,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) goto out_free; iov->length = size; - iov->lkey = ia->ri_dma_lkey; + iov->lkey = ia->ri_pd->local_dma_lkey; rb->rg_size = size; rb->rg_owner = NULL; return rb; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -65,7 +65,6 @@ struct rpcrdma_ia { struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; struct ib_mr *ri_dma_mr; - u32 ri_dma_lkey; struct completion ri_done; int ri_async_rc; unsigned int ri_max_frmr_depth; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..ef31b40ad550 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2179,8 +2179,21 @@ unlock: if (UNIXCB(skb).fp) scm.fp = scm_fp_dup(UNIXCB(skb).fp); - sk_peek_offset_fwd(sk, chunk); + if (skip) { + sk_peek_offset_fwd(sk, chunk); + skip -= chunk; + } + if (UNIXCB(skb).fp) + break; + + last = skb; + last_len = skb->len; + unix_state_lock(sk); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (skb) + goto again; + unix_state_unlock(sk); break; } } while (size); diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c @@ -1,13 +1,13 @@ /* * Here's a sample kernel module showing the use of jprobes to dump - * the arguments of do_fork(). + * the arguments of _do_fork(). * * For more information on theory of operation of jprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the - * console whenever do_fork() is invoked to create a new process. + * console whenever _do_fork() is invoked to create a new process. * (Some messages may be suppressed if syslogd is configured to * eliminate duplicate messages.) */ @@ -17,13 +17,13 @@ #include <linux/kprobes.h> /* - * Jumper probe for do_fork. + * Jumper probe for _do_fork. * Mirror principle enables access to arguments of the probed routine * from the probe handler. */ -/* Proxy routine having the same arguments as actual do_fork() routine */ -static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, +/* Proxy routine having the same arguments as actual _do_fork() routine */ +static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { @@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, } static struct jprobe my_jprobe = { - .entry = jdo_fork, + .entry = j_do_fork, .kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }, }; diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -1,13 +1,13 @@ /* * NOTE: This example is works on x86 and powerpc. * Here's a sample kernel module showing the use of kprobes to dump a - * stack trace and selected registers when do_fork() is called. + * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see * Documentation/kprobes.txt * * You will see the trace data in /var/log/messages and on the console - * whenever do_fork() is invoked to create a new process. + * whenever _do_fork() is invoked to create a new process. */ #include <linux/kernel.h> @@ -16,7 +16,7 @@ /* For each probe you need to allocate a kprobe structure */ static struct kprobe kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }; /* kprobe pre_handler: called just before the probed instruction is executed */ diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -7,7 +7,7 @@ * * usage: insmod kretprobe_example.ko func=<func_name> * - * If no func_name is specified, do_fork is instrumented + * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt @@ -25,7 +25,7 @@ #include <linux/limits.h> #include <linux/sched.h> -static char func_name[NAME_MAX] = "do_fork"; +static char func_name[NAME_MAX] = "_do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 6ce5945a0b89..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -17,13 +17,9 @@ #include <stdint.h> #include <stdbool.h> #include <string.h> -#include <getopt.h> #include <err.h> -#include <arpa/inet.h> #include <openssl/bio.h> -#include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/pkcs7.h> #include <openssl/err.h> #include <openssl/engine.h> diff --git a/scripts/sign-file.c b/scripts/sign-file.c index c3899ca4811c..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -20,13 +20,34 @@ #include <getopt.h> #include <err.h> #include <arpa/inet.h> +#include <openssl/opensslv.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/cms.h> #include <openssl/err.h> #include <openssl/engine.h> +/* + * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to + * assume that it's not available and its header file is missing and that we + * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts + * the options we have on specifying the X.509 certificate we want. + * + * Further, older versions of OpenSSL don't support manually adding signers to + * the PKCS#7 message so have to accept that we get a certificate included in + * the signature message. Nor do such older versions of OpenSSL support + * signing with anything other than SHA1 - so we're stuck with that if such is + * the case. + */ +#if OPENSSL_VERSION_NUMBER < 0x10000000L +#define USE_PKCS7 +#endif +#ifndef USE_PKCS7 +#include <openssl/cms.h> +#else +#include <openssl/pkcs7.h> +#endif + struct module_signature { uint8_t algo; /* Public-key crypto algorithm [0] */ uint8_t hash; /* Digest algorithm [0] */ @@ -110,30 +131,42 @@ int main(int argc, char **argv) struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; char *hash_algo = NULL; char *private_key_name, *x509_name, *module_name, *dest_name; - bool save_cms = false, replace_orig; + bool save_sig = false, replace_orig; bool sign_only = false; unsigned char buf[4096]; - unsigned long module_size, cms_size; - unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; + unsigned long module_size, sig_size; + unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#ifndef USE_PKCS7 CMS_ContentInfo *cms; + unsigned int use_keyid = 0; +#else + PKCS7 *pkcs7; +#endif X509 *x509; BIO *b, *bd = NULL, *bm; int opt, n; - OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); ERR_clear_error(); key_pass = getenv("KBUILD_SIGN_PIN"); +#ifndef USE_PKCS7 + use_signed_attrs = CMS_NOATTR; +#else + use_signed_attrs = PKCS7_NOATTR; +#endif + do { opt = getopt(argc, argv, "dpk"); switch (opt) { - case 'p': save_cms = true; break; - case 'd': sign_only = true; save_cms = true; break; + case 'p': save_sig = true; break; + case 'd': sign_only = true; save_sig = true; break; +#ifndef USE_PKCS7 case 'k': use_keyid = CMS_USE_KEYID; break; +#endif case -1: break; default: format(); } @@ -157,6 +190,14 @@ int main(int argc, char **argv) replace_orig = true; } +#ifdef USE_PKCS7 + if (strcmp(hash_algo, "sha1") != 0) { + fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", + OPENSSL_VERSION_TEXT); + exit(3); + } +#endif + /* Read the private key and the X.509 cert the PKCS#7 message * will point to. */ @@ -213,7 +254,8 @@ int main(int argc, char **argv) bm = BIO_new_file(module_name, "rb"); ERR(!bm, "%s", module_name); - /* Load the CMS message from the digest buffer. */ +#ifndef USE_PKCS7 + /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); ERR(!cms, "CMS_sign"); @@ -221,17 +263,31 @@ int main(int argc, char **argv) ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | use_keyid | use_signed_attrs), - "CMS_sign_add_signer"); + "CMS_add1_signer"); ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, "CMS_final"); - if (save_cms) { - char *cms_name; +#else + pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, + PKCS7_NOCERTS | PKCS7_BINARY | + PKCS7_DETACHED | use_signed_attrs); + ERR(!pkcs7, "PKCS7_sign"); +#endif - ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); - b = BIO_new_file(cms_name, "wb"); - ERR(!b, "%s", cms_name); - ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); + if (save_sig) { + char *sig_file_name; + + ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, + "asprintf"); + b = BIO_new_file(sig_file_name, "wb"); + ERR(!b, "%s", sig_file_name); +#ifndef USE_PKCS7 + ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, + "%s", sig_file_name); +#else + ERR(i2d_PKCS7_bio(b, pkcs7) < 0, + "%s", sig_file_name); +#endif BIO_free(b); } @@ -247,9 +303,13 @@ int main(int argc, char **argv) ERR(n < 0, "%s", module_name); module_size = BIO_number_written(bd); +#ifndef USE_PKCS7 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); - cms_size = BIO_number_written(bd) - module_size; - sig_info.sig_len = htonl(cms_size); +#else + ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); +#endif + sig_size = BIO_number_written(bd) - module_size; + sig_info.sig_len = htonl(sig_size); ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..39eac1fd5706 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); + /* Throw away the key data */ + if (key->type->destroy) + key->type->destroy(key); + security_key_free(key); /* deal with the user's key tracking and quota */ @@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - key_user_put(key->user); kfree(key->description); diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 584a0343ab0c..85813de26da8 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), {} /* terminator */ }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index afec6dc9f91f..16b8dcba5c12 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9d947aef2c8b..def5cc8dff02 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec) return err; spec = codec->spec; - codec->power_save_node = 1; + /* enable power_save_node only for new 92HD89xx chips, as it causes + * click noises on old 92HD73xx chips. + */ + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670) + codec->power_save_node = 1; spec->linear_tone_beep = 0; spec->gen.mixer_nid = 0x1d; spec->have_spdif_mux = 1; diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c index 58c3164802b8..8c907ebea189 100644 --- a/sound/soc/au1x/db1200.c +++ b/sound/soc/au1x/db1200.c @@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.2", .platform_name = "au1xpsc-pcm.2", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; @@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.3", .platform_name = "au1xpsc-pcm.3", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 268a28bd1df4..5c101af0ac63 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = { RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ - SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, + SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1, RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), - SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, - RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, + SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2, + RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), /* I2S2 function select */ diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0e4cfc6ac649..8c964cfb120d 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -39,8 +39,8 @@ #define RT5645_STO1_ADC_DIG_VOL 0x1c #define RT5645_MONO_ADC_DIG_VOL 0x1d #define RT5645_ADC_BST_VOL1 0x1e -/* Mixer - D-D */ #define RT5645_ADC_BST_VOL2 0x20 +/* Mixer - D-D */ #define RT5645_STO1_ADC_MIXER 0x27 #define RT5645_MONO_ADC_MIXER 0x28 #define RT5645_AD_DA_MIXER 0x29 @@ -315,12 +315,14 @@ #define RT5645_STO1_ADC_R_BST_SFT 12 #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) #define RT5645_STO1_ADC_COMP_SFT 10 -#define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) -#define RT5645_STO2_ADC_L_BST_SFT 8 -#define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) -#define RT5645_STO2_ADC_R_BST_SFT 6 -#define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) -#define RT5645_STO2_ADC_COMP_SFT 4 + +/* ADC Boost Volume Control (0x20) */ +#define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14) +#define RT5645_MONO_ADC_L_BST_SFT 14 +#define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12) +#define RT5645_MONO_ADC_R_BST_SFT 12 +#define RT5645_MONO_ADC_COMP_MASK (0x3 << 10) +#define RT5645_MONO_ADC_COMP_SFT 10 /* Stereo2 ADC Mixer Control (0x26) */ #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index bfda25ef0dd4..f540f82b1f27 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, - SGTL5000_BIAS_R_MASK, - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); + SGTL5000_BIAS_VOLT_MASK, + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT); /* * disable DAP * TODO: @@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, else { sgtl5000->micbias_voltage = 0; dev_err(&client->dev, - "Unsuitable MicBias resistor\n"); + "Unsuitable MicBias voltage\n"); } } else { sgtl5000->micbias_voltage = 0; diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c index e3a0bca28bcf..cc1d3981fa4b 100644 --- a/sound/soc/codecs/tas2552.c +++ b/sound/soc/codecs/tas2552.c @@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = { /* * DAC digital volumes. From -7 to 24 dB in 1 dB steps */ -static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); +static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); static const char * const tas2552_din_source_select[] = { "Muted", diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 1a82b19b2644..8739126a1f6f 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec) snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); - /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); - /* Line2 Line Out default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + /* On tlv320aic3104, these registers are reserved and must not be written */ + if (aic3x->model != AIC3X_MODEL_3104) { + /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); + /* Line2 Line Out default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + } switch (aic3x->model) { case AIC3X_MODEL_3X: diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 293e47a6ff59..2fbc6ef8cbdb 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3760,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8962, &wm8962_dai, 1); if (ret < 0) - goto err_enable; + goto err_pm_runtime; regcache_cache_only(wm8962->regmap, true); @@ -3769,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, return 0; +err_pm_runtime: + pm_runtime_disable(&i2c->dev); err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); err: @@ -3778,6 +3780,7 @@ err: static int wm8962_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); + pm_runtime_disable(&client->dev); return 0; } diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index a3e97b46b64e..ba34252b7bba 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c @@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream) if (stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, TOR(i), 0); + i2s_read_reg(dev->i2s_base, TOR(i)); } else { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, ROR(i), 0); + i2s_read_reg(dev->i2s_base, ROR(i)); } } static void i2s_start(struct dw_i2s_dev *dev, struct snd_pcm_substream *substream) { - + u32 i, irq; i2s_write_reg(dev->i2s_base, IER, 1); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30); + } i2s_write_reg(dev->i2s_base, ITER, 1); - else + } else { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03); + } i2s_write_reg(dev->i2s_base, IRER, 1); + } i2s_write_reg(dev->i2s_base, CER, 1); } diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 48b2d24dd1f0..b95132e2f9dc 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* data on rising edge of bclk, frame low 1clk before data */ - strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI | + SSI_STCR_TEFS; scr |= SSI_SCR_NET; if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { scr &= ~SSI_I2S_MODE_MASK; @@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) break; case SND_SOC_DAIFMT_LEFT_J: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_DSP_B: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL; break; case SND_SOC_DAIFMT_DSP_A: /* data on rising edge of bclk, frame high 1clk before data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL | + SSI_STCR_TEFS; break; } /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: - strcr |= SSI_STCR_TFSI; - strcr &= ~SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_IB_NF: - strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); + strcr ^= SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_NB_IF: - strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_NB_NF: - strcr &= ~SSI_STCR_TFSI; - strcr |= SSI_STCR_TSCKP; break; } diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index 82e350e9501c..ac75816ada7c 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c @@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu) struct snd_seq_oss_reg *arg; struct snd_seq_device *dev; - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, + /* using device#1 here for avoiding conflicts with OPL3 */ + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, sizeof(struct snd_seq_oss_reg), &dev) < 0) return; diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ libelf-getphdrnum \ libelf-mmap \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libpython-version \ @@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ timerfd \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid FEATURE_DISPLAY ?= \ dwarf \ @@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ libbfd \ libelf \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libslang \ libunwind \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. # If in the future we need per-feature checks/flags for features not diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -19,6 +19,7 @@ FILES= \ test-libelf-getphdrnum.bin \ test-libelf-mmap.bin \ test-libnuma.bin \ + test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ test-libpython-version.bin \ @@ -34,7 +35,8 @@ FILES= \ test-compile-x32.bin \ test-zlib.bin \ test-lzma.bin \ - test-bpf.bin + test-bpf.bin \ + test-get_cpuid.bin CC := $(CROSS_COMPILE)gcc -MD PKG_CONFIG := $(CROSS_COMPILE)pkg-config @@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: test-libnuma.bin: $(BUILD) -lnuma +test-numa_num_possible_cpus.bin: + $(BUILD) -lnuma + test-libunwind.bin: $(BUILD) -lelf @@ -162,6 +167,9 @@ test-zlib.bin: test-lzma.bin: $(BUILD) -llzma +test-get_cpuid.bin: + $(BUILD) + test-bpf.bin: $(BUILD) diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -77,6 +77,10 @@ # include "test-libnuma.c" #undef main +#define main main_test_numa_num_possible_cpus +# include "test-numa_num_possible_cpus.c" +#undef main + #define main main_test_timerfd # include "test-timerfd.c" #undef main @@ -117,6 +121,10 @@ # include "test-lzma.c" #undef main +#define main main_test_get_cpuid +# include "test-get_cpuid.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -136,6 +144,7 @@ int main(int argc, char *argv[]) main_test_libbfd(); main_test_backtrace(); main_test_libnuma(); + main_test_numa_num_possible_cpus(); main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); @@ -143,6 +152,7 @@ int main(int argc, char *argv[]) main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_lzma(); + main_test_get_cpuid(); return 0; } diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c @@ -0,0 +1,7 @@ +#include <cpuid.h> + +int main(void) +{ + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); +} diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c @@ -0,0 +1,6 @@ +#include <numa.h> + +int main(void) +{ + return numa_num_possible_cpus(); +} diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct format_field *field; struct printk_map *printk; long long val, fval; - unsigned long addr; + unsigned long long addr; char *str; unsigned char *hex; int print; @@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, */ if (!(field->flags & FIELD_IS_ARRAY) && field->size == pevent->long_size) { - addr = *(unsigned long *)(data + field->offset); + + /* Handle heterogeneous recording and processing + * architectures + * + * CASE I: + * Traces recorded on 32-bit devices (32-bit + * addressing) and processed on 64-bit devices: + * In this case, only 32 bits should be read. + * + * CASE II: + * Traces recorded on 64 bit devices and processed + * on 32-bit devices: + * In this case, 64 bits must be read. + */ + addr = (pevent->long_size == 8) ? + *(unsigned long long *)(data + field->offset) : + (unsigned long long)*(unsigned int *)(data + field->offset); + /* Check if it matches a print format */ printk = find_printk(pevent, addr); if (printk) trace_seq_puts(s, printk->printk); else - trace_seq_printf(s, "%lx", addr); + trace_seq_printf(s, "%llx", addr); break; } str = malloc(len + 1); diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc CYC packets are not requested by default. -no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. - - It stops the driver resetting the byte count to zero whenever - enabling the trace (for example on context switches) which in - turn results in no PSB being forced. However some processors - will produce a PSB anyway. - - In any case, there is still a PSB when the trace is enabled for - the first time. - - no_force_psb can be used to slightly decrease the trace size but - may make it harder for the decoder to recover from errors. - - no_force_psb is not selected by default. - new snapshot option ------------------- diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -573,9 +573,14 @@ ifndef NO_LIBNUMA msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); NO_LIBNUMA := 1 else - CFLAGS += -DHAVE_LIBNUMA_SUPPORT - EXTLIBS += -lnuma - $(call detected,CONFIG_NUMA) + ifeq ($(feature-numa_num_possible_cpus), 0) + msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); + NO_LIBNUMA := 1 + else + CFLAGS += -DHAVE_LIBNUMA_SUPPORT + EXTLIBS += -lnuma + $(call detected,CONFIG_NUMA) + endif endif endif @@ -621,8 +626,13 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - $(call detected,CONFIG_AUXTRACE) - CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-get_cpuid), 0) + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + NO_AUXTRACE := 1 + else + $(call detected,CONFIG_AUXTRACE) + CFLAGS += -DHAVE_AUXTRACE_SUPPORT + endif endif # Among the variables below, these: diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 349bc96ca1fe..e5f18a288b74 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -17,6 +17,7 @@ libperf-y += levenshtein.o libperf-y += llvm-utils.o libperf-y += parse-options.o libperf-y += parse-events.o +libperf-y += perf_regs.o libperf-y += path.o libperf-y += rbtree.o libperf-y += bitmap.o @@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o libperf-y += scripting-engines/ -libperf-$(CONFIG_PERF_REGS) += perf_regs.o libperf-$(CONFIG_ZLIB) += zlib.o libperf-$(CONFIG_LZMA) += lzma.o diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 885e8ac83997..6b8eb13e14e4 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = { SMPL_REG_END }; +#ifdef HAVE_PERF_REGS_SUPPORT int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) { int i, idx = 0; @@ -29,3 +30,4 @@ out: *valp = regs->cache_regs[id]; return 0; } +#endif diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 2984dcc54d67..679d6e493962 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -2,6 +2,7 @@ #define __PERF_REGS_H #include <linux/types.h> +#include <linux/compiler.h> struct regs_dump; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) int ret = 0; if (module) { - list_for_each_entry(dso, &host_machine->dsos.head, node) { - if (!dso->kernel) - continue; - if (strncmp(dso->short_name + 1, module, - dso->short_name_len - 2) == 0) - goto found; + char module_name[128]; + + snprintf(module_name, sizeof(module_name), "[%s]", module); + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + if (map) { + dso = map->dso; + goto found; } pr_debug("Failed to find module %s.\n", module); return -ENOENT; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, file_offset = page_offset; head = data_offset - page_offset; - if (data_size && (data_offset + data_size < file_size)) + if (data_size == 0) + goto out; + + if (data_offset + data_size < file_size) file_size = data_offset + data_size; ui_progress__init(&prog, file_size, "Processing events..."); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); } -static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) +static int check_per_pkg(struct perf_evsel *counter, + struct perf_counts_values *vals, int cpu, bool *skip) { unsigned long *mask = counter->per_pkg_mask; struct cpu_map *cpus = perf_evsel__cpus(counter); @@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) counter->per_pkg_mask = mask; } + /* + * we do not consider an event that has not run as a good + * instance to mark a package as used (skip=1). Otherwise + * we may run into a situation where the first CPU in a package + * is not running anything, yet the second is, and this function + * would mark the package as used after the first CPU and would + * not read the values from the second CPU. + */ + if (!(vals->run && vals->ena)) + return 0; + s = cpu_map__get_socket(cpus, cpu); if (s < 0) return -1; @@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel static struct perf_counts_values zero; bool skip = false; - if (check_per_pkg(evsel, cpu, &skip)) { + if (check_per_pkg(evsel, count, cpu, &skip)) { pr_err("failed to read per-pkg counter\n"); return -1; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, #endif #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT -int elf_getphdrnum(Elf *elf, size_t *dst) +static int elf_getphdrnum(Elf *elf, size_t *dst) { GElf_Ehdr gehdr; GElf_Ehdr *ehdr; @@ -1271,8 +1271,6 @@ out_close: static int kcore__init(struct kcore *kcore, char *filename, int elfclass, bool temp) { - GElf_Ehdr *ehdr; - kcore->elfclass = elfclass; if (temp) @@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, if (!gelf_newehdr(kcore->elf, elfclass)) goto out_end; - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); - if (!ehdr) - goto out_end; + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); return 0; @@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, u64 addr, u64 len) { - GElf_Phdr gphdr; - GElf_Phdr *phdr; - - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); - if (!phdr) - return -1; - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = offset; - phdr->p_vaddr = addr; - phdr->p_paddr = 0; - phdr->p_filesz = len; - phdr->p_memsz = len; - phdr->p_align = page_size; - - if (!gelf_update_phdr(kcore->elf, idx, phdr)) + GElf_Phdr phdr = { + .p_type = PT_LOAD, + .p_flags = PF_R | PF_W | PF_X, + .p_offset = offset, + .p_vaddr = addr, + .p_paddr = 0, + .p_filesz = len, + .p_memsz = len, + .p_align = page_size, + }; + + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) return -1; return 0; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -709,7 +709,7 @@ bool find_process(const char *name) dir = opendir(procfs__mountpoint()); if (!dir) - return -1; + return false; /* Walk through the directory. */ while (ret && (d = readdir(dir)) != NULL) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; +unsigned int aperf_mperf_multiplier = 1; int do_smi; double bclk; +double base_hz; +double tsc_tweak = 1.0; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; @@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* %Busy */ if (has_aperf) { if (!skip_c0) - outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); else outp += sprintf(outp, "********"); } @@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* Bzy_MHz */ if (has_aperf) outp += sprintf(outp, "%8.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); /* TSC_MHz */ outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); @@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; + t->aperf = t->aperf * aperf_mperf_multiplier; + t->mperf = t->mperf * aperf_mperf_multiplier; } if (do_smi) { @@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; + +static void +calculate_tsc_tweak() +{ + unsigned long long msr; + unsigned int base_ratio; + + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); + base_ratio = (msr >> 8) & 0xFF; + base_hz = base_ratio * bclk * 1000000; + tsc_tweak = base_hz / tsc_hz; +} + static void dump_nhm_platform_info(void) { @@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) switch (model) { case 0x3A: /* IVB */ - case 0x3E: /* IVB Xeon */ - case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ @@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) return 0; } +unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) +{ + if (is_knl(family, model)) + return 1024; + return 1; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2744,6 +2767,9 @@ void process_cpuid() } } + if (has_aperf) + aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); + do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); @@ -2762,6 +2788,9 @@ void process_cpuid() if (debug) dump_cstate_pstate_config_info(); + if (has_skl_msrs(family, model)) + calculate_tsc_tweak(); + return; } @@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.7 17-June, 2015" + fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c index 05c6d15856eb..9db9d21bb2ec 100644 --- a/tools/usb/usbip/src/usbip_detach.c +++ b/tools/usb/usbip/src/usbip_detach.c @@ -47,7 +47,9 @@ static int detach_port(char *port) uint8_t portnum; char path[PATH_MAX+1]; - for (unsigned int i = 0; i < strlen(port); i++) + unsigned int port_len = strlen(port); + + for (unsigned int i = 0; i < port_len; i++) if (!isdigit(port[i])) { err("invalid port %s", port); return -1; |