diff options
313 files changed, 2258 insertions, 1316 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt index ce6a1a072028..8a3c40829899 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt @@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents. Example: interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; -A device node may contain either "interrupts" or "interrupts-extended", but not -both. If both properties are present, then the operating system should log an -error and use only the data in "interrupts". - 2) Interrupt controller nodes ----------------------------- diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index 41aeed38926d..f8fbe9af7b2f 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt @@ -7,3 +7,14 @@ And for the interrupt mapping part: Open Firmware Recommended Practice: Interrupt Mapping http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf + +Additionally to the properties specified in the above standards a host bridge +driver implementation may support the following properties: + +- linux,pci-domain: + If present this property assigns a fixed PCI domain number to a host bridge, + otherwise an unstable (across boots) unique number will be assigned. + It is required to either not set this property at all or set it for all + host bridges in the system, otherwise potentially conflicting domain numbers + may be assigned to root buses behind different host bridges. The domain + number for each host bridge in the system must be unique. diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt index a186181c402b..51b943cc9770 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090-PDC's pin configuration nodes act as a container for an abitrary number +TZ1090-PDC's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt index 4b27c99f7f9d..49d0e6050940 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090's pin configuration nodes act as a container for an abitrary number of +TZ1090's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt index daa768956069..ac4da9fe07bd 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt index b5469db1d7ad..e89b4677567d 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt index 61e73cde9ae9..3c8ce28baad6 100644 --- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt +++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Tegra's pin configuration nodes act as a container for an abitrary number of +Tegra's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt index c596a6ad3285..5f55be59d914 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt @@ -13,7 +13,7 @@ Optional properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SiRFprimaII's pinmux nodes act as a container for an abitrary number of subnodes. +SiRFprimaII's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a group of pins. Required subnode-properties: diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt index b4480d5c3aca..458615596946 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt @@ -32,7 +32,7 @@ Required properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SPEAr's pinmux nodes act as a container for an abitrary number of subnodes. Each +SPEAr's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents muxing for a pin, a group, or a list of pins or groups. diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt index 2fb90b37aa09..a7bde64798c7 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt index ffafa1990a30..c4ea61ac56f2 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt index e33e4dcdce79..6e88e91feb11 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt index 93b7de91b9f6..eb8d8aa41f20 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt index d2ea80dc43eb..e4d6a9d20f7d 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 723999d73744..a344ec2713a5 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -34,6 +34,7 @@ chipidea Chipidea, Inc chrp Common Hardware Reference Platform chunghwa Chunghwa Picture Tubes Ltd. cirrus Cirrus Logic, Inc. +cnm Chips&Media, Inc. cortina Cortina Systems, Inc. crystalfontz Crystalfontz America, Inc. dallas Maxim Integrated Products (formerly Dallas Semiconductor) @@ -92,6 +93,7 @@ maxim Maxim Integrated Products mediatek MediaTek Inc. micrel Micrel Inc. microchip Microchip Technology Inc. +micron Micron Technology Inc. mitsubishi Mitsubishi Electric Corporation mosaixtech Mosaix Technologies, Inc. moxa Moxa @@ -127,6 +129,7 @@ renesas Renesas Electronics Corporation ricoh Ricoh Co. Ltd. rockchip Fuzhou Rockchip Electronics Co., Ltd samsung Samsung Semiconductor +sandisk Sandisk Corporation sbs Smart Battery System schindler Schindler seagate Seagate Technology PLC @@ -138,7 +141,7 @@ silergy Silergy Corp. sirf SiRF Technology, Inc. sitronix Sitronix Technology Corporation smsc Standard Microsystems Corporation -snps Synopsys, Inc. +snps Synopsys, Inc. solidrun SolidRun sony Sony Corporation spansion Spansion Inc. diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 530850a72735..a27c950ece61 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -64,7 +64,7 @@ is formed. At mount time, the two directories given as mount options "lowerdir" and "upperdir" are combined into a merged directory: - mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ + mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\ workdir=/work /merged The "workdir" needs to be an empty directory on the same filesystem diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 412f45ca2d73..1d6d02d6ba52 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID: This option is implemented only for transmit timestamps. There, the timestamp is always looped along with a struct sock_extended_err. - The option modifies field ee_info to pass an id that is unique + The option modifies field ee_data to pass an id that is unique among all possibly concurrently outstanding timestamp requests for that socket. In practice, it is a monotonically increasing u32 (that wraps). diff --git a/MAINTAINERS b/MAINTAINERS index c444907ccd69..0ff630de8a6d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6888,11 +6888,12 @@ F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ -OVERLAYFS FILESYSTEM +OVERLAY FILESYSTEM M: Miklos Szeredi <miklos@szeredi.hu> -L: linux-fsdevel@vger.kernel.org +L: linux-unionfs@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git S: Supported -F: fs/overlayfs/* +F: fs/overlayfs/ F: Documentation/filesystems/overlayfs.txt P54 WIRELESS DRIVER @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc7 NAME = Diseased Newt # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts index e51fcef884a4..60429ad1c5d8 100644 --- a/arch/arm/boot/dts/exynos5250-snow.dts +++ b/arch/arm/boot/dts/exynos5250-snow.dts @@ -624,4 +624,8 @@ num-cs = <1>; }; +&usbdrd_dwc3 { + dr_mode = "host"; +}; + #include "cros-ec-keyboard.dtsi" diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index f21b9aa00fbb..d55c1a2eb798 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -555,7 +555,7 @@ #size-cells = <1>; ranges; - dwc3 { + usbdrd_dwc3: dwc3 { compatible = "synopsys,dwc3"; reg = <0x12000000 0x10000>; interrupts = <0 72 0>; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index d46c213a17ad..eed697a6bd6b 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -433,7 +433,7 @@ clocks = <&cpg_clocks R8A7740_CLK_S>, <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>, - <&sub_clk>, <&sub_clk>, + <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>; #clock-cells = <1>; renesas,clock-indices = < diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index d0e17733dc1a..e20affe156c1 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -666,9 +666,9 @@ #clock-cells = <0>; clock-output-names = "sd2"; }; - sd3_clk: sd3_clk@e615007c { + sd3_clk: sd3_clk@e615026c { compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock"; - reg = <0 0xe615007c 0 4>; + reg = <0 0xe615026c 0 4>; clocks = <&pll1_div2_clk>; #clock-cells = <0>; clock-output-names = "sd3"; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 543f895d18d3..2e652e2339e9 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -361,6 +361,10 @@ clocks = <&ahb1_gates 6>; resets = <&ahb1_rst 6>; #dma-cells = <1>; + + /* DMA controller requires AHB1 clocked from PLL6 */ + assigned-clocks = <&ahb1_mux>; + assigned-clock-parents = <&pll6>; }; mmc0: mmc@01c0f000 { diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 5c21d216515a..8b7aa0dcdc6e 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts @@ -15,6 +15,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65913@58"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts index c7c6825f11fb..38acf78d7815 100644 --- a/arch/arm/boot/dts/tegra114-roth.dts +++ b/arch/arm/boot/dts/tegra114-roth.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -916,8 +920,6 @@ regulator-name = "vddio-sdmmc3"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; }; ldousb { @@ -962,7 +964,7 @@ sdhci@78000400 { status = "okay"; bus-width = <4>; - vmmc-supply = <&vddio_sdmmc3>; + vqmmc-supply = <&vddio_sdmmc3>; cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>; power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; }; @@ -971,7 +973,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts index 963662145635..f91c2c9b2f94 100644 --- a/arch/arm/boot/dts/tegra114-tn7.dts +++ b/arch/arm/boot/dts/tegra114-tn7.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -240,7 +244,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 2ca9c1807f72..222f3b3f4dd5 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -9,13 +9,6 @@ compatible = "nvidia,tegra114"; interrupt-parent = <&gic>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - }; - host1x@50000000 { compatible = "nvidia,tegra114-host1x", "simple-bus"; reg = <0x50000000 0x00028000>; diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 029c9a021541..51b373ff1065 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts index 7d0784ce4c74..53181d310247 100644 --- a/arch/arm/boot/dts/tegra124-nyan-big.dts +++ b/arch/arm/boot/dts/tegra124-nyan-big.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index 13008858e967..5c3f7813360d 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 478c555ebd96..df2b06b29985 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -286,7 +286,7 @@ * the APB DMA based serial driver, the comptible is * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart". */ - serial@0,70006000 { + uarta: serial@0,70006000 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006000 0x0 0x40>; reg-shift = <2>; @@ -299,7 +299,7 @@ status = "disabled"; }; - serial@0,70006040 { + uartb: serial@0,70006040 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006040 0x0 0x40>; reg-shift = <2>; @@ -312,7 +312,7 @@ status = "disabled"; }; - serial@0,70006200 { + uartc: serial@0,70006200 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006200 0x0 0x40>; reg-shift = <2>; @@ -325,7 +325,7 @@ status = "disabled"; }; - serial@0,70006300 { + uartd: serial@0,70006300 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006300 0x0 0x40>; reg-shift = <2>; diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts index a37279af687c..b926a07b9443 100644 --- a/arch/arm/boot/dts/tegra20-harmony.dts +++ b/arch/arm/boot/dts/tegra20-harmony.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts index 8cfb83f42e1f..1dd7d7bfdfcc 100644 --- a/arch/arm/boot/dts/tegra20-iris-512.dts +++ b/arch/arm/boot/dts/tegra20-iris-512.dts @@ -6,6 +6,11 @@ model = "Toradex Colibri T20 512MB on Iris"; compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20"; + aliases { + serial0 = &uarta; + serial1 = &uartd; + }; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts index 1b7c56b33aca..9b87526ab0b7 100644 --- a/arch/arm/boot/dts/tegra20-medcom-wide.dts +++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts @@ -6,6 +6,10 @@ model = "Avionic Design Medcom-Wide board"; compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20"; + aliases { + serial0 = &uartd; + }; + pwm@7000a000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index d4438e30de45..ed7e1009326c 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -10,6 +10,8 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index a1d4bf9895d7..ea282c7c0ca5 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index 80e7d386ce34..13d4e6185275 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -7,6 +7,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 5ad87979ab13..d99af4ef9c64 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000c500/rtc@56"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts index ca8484cccddc..04c58e9ca490 100644 --- a/arch/arm/boot/dts/tegra20-ventana.dts +++ b/arch/arm/boot/dts/tegra20-ventana.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index 1843725785c9..340d81108df1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/max8907@3c"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 3b374c49d04d..8acf5d85c99d 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra20"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - host1x@50000000 { compatible = "nvidia,tegra20-host1x", "simple-bus"; reg = <0x50000000 0x00024000>; diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts index 45d40f024585..6236bdecb48b 100644 --- a/arch/arm/boot/dts/tegra30-apalis-eval.dts +++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts @@ -11,6 +11,10 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartc; + serial3 = &uartd; }; pcie-controller@00003000 { diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index cee8f2246fdb..6b157eeabcc5 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -9,6 +9,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index 206379546244..a1b682ea01bd 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -30,6 +30,8 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts index 7793abd5bef1..4d3ddc585641 100644 --- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts @@ -10,6 +10,9 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartd; }; host1x@50000000 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index aa6ccea13d30..b270b9e3d455 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra30"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - pcie-controller@00003000 { compatible = "nvidia,tegra30-pcie"; device_type = "pci"; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 72058b8a6f4d..e21ef830a483 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_EXYNOS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_MAX77802=y CONFIG_RTC_DRV_S5M=y CONFIG_RTC_DRV_S3C=y CONFIG_DMADEVICES=y CONFIG_PL330_DMA=y CONFIG_COMMON_CLK_MAX77686=y +CONFIG_COMMON_CLK_MAX77802=y CONFIG_COMMON_CLK_S2MPS11=y CONFIG_EXYNOS_IOMMU=y CONFIG_IIO=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 3487046d8a78..9d7a32f93fcf 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_S3C2410=y CONFIG_I2C_SIRF=y CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index fc44d3761f9e..ce73ab635414 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -44,16 +44,6 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; -struct arm_restart_block { - union { - /* For user cache flushing */ - struct { - unsigned long start; - unsigned long end; - } cache; - }; -}; - /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -79,7 +69,6 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; - struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0c8b10801d36..9f5d81881eb6 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs) return regs->ARM_r0; } -static long do_cache_op_restart(struct restart_block *); - static inline int __do_cache_op(unsigned long start, unsigned long end) { @@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end) do { unsigned long chunk = min(PAGE_SIZE, end - start); - if (signal_pending(current)) { - struct thread_info *ti = current_thread_info(); - - ti->restart_block = (struct restart_block) { - .fn = do_cache_op_restart, - }; - - ti->arm_restart_block = (struct arm_restart_block) { - { - .cache = { - .start = start, - .end = end, - }, - }, - }; - - return -ERESTART_RESTARTBLOCK; - } + if (fatal_signal_pending(current)) + return 0; ret = flush_cache_user_range(start, start + chunk); if (ret) @@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end) return 0; } -static long do_cache_op_restart(struct restart_block *unused) -{ - struct arm_restart_block *restart_block; - - restart_block = ¤t_thread_info()->arm_restart_block; - return __do_cache_op(restart_block->cache.start, - restart_block->cache.end); -} - static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 57a403a5c22b..8664ff17cbbe 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pgd = pgdp + pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); - unmap_puds(kvm, pgd, addr, next); + if (!pgd_none(*pgd)) + unmap_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } @@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) return kvm_vcpu_dabt_iswrite(vcpu); } +static bool kvm_is_device_pfn(unsigned long pfn) +{ + return !pfn_valid(pfn); +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) @@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_pfn(pfn)) return -EFAULT; - if (kvm_is_mmio_pfn(pfn)) + if (kvm_is_device_pfn(pfn)) mem_type = PAGE_S2_DEVICE; spin_lock(&kvm->mmu_lock); diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 2bdc3233abe2..044b51185fcc 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -400,6 +400,8 @@ int __init coherency_init(void) type == COHERENCY_FABRIC_TYPE_ARMADA_380) armada_375_380_coherency_init(np); + of_node_put(np); + return 0; } diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 0794f0426e70..19df9cb30495 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -455,7 +455,7 @@ enum { MSTP128, MSTP127, MSTP125, MSTP116, MSTP111, MSTP100, MSTP117, - MSTP230, + MSTP230, MSTP229, MSTP222, MSTP218, MSTP217, MSTP216, MSTP214, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, @@ -474,11 +474,12 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ - [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ + [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */ [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */ [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ + [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */ [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ @@ -575,6 +576,10 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]), + CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]), CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]), diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c index 126ddafad526..f62265200592 100644 --- a/arch/arm/mach-shmobile/clock-r8a7790.c +++ b/arch/arm/mach-shmobile/clock-r8a7790.c @@ -68,7 +68,7 @@ #define SDCKCR 0xE6150074 #define SD2CKCR 0xE6150078 -#define SD3CKCR 0xE615007C +#define SD3CKCR 0xE615026C #define MMC0CKCR 0xE6150240 #define MMC1CKCR 0xE6150244 #define SSPCKCR 0xE6150248 diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index b7bd8e509668..328657d011d5 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -26,6 +26,7 @@ #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/input.h> +#include <linux/i2c/i2c-sh_mobile.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_dma.h> @@ -192,11 +193,18 @@ static struct resource i2c4_resources[] = { }, }; +static struct i2c_sh_mobile_platform_data i2c_platform_data = { + .clks_per_count = 2, +}; + static struct platform_device i2c0_device = { .name = "i2c-sh_mobile", .id = 0, .resource = i2c0_resources, .num_resources = ARRAY_SIZE(i2c0_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c1_device = { @@ -204,6 +212,9 @@ static struct platform_device i2c1_device = { .id = 1, .resource = i2c1_resources, .num_resources = ARRAY_SIZE(i2c1_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c2_device = { @@ -211,6 +222,9 @@ static struct platform_device i2c2_device = { .id = 2, .resource = i2c2_resources, .num_resources = ARRAY_SIZE(i2c2_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c3_device = { @@ -218,6 +232,9 @@ static struct platform_device i2c3_device = { .id = 3, .resource = i2c3_resources, .num_resources = ARRAY_SIZE(i2c3_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c4_device = { @@ -225,6 +242,9 @@ static struct platform_device i2c4_device = { .id = 4, .resource = i2c4_resources, .num_resources = ARRAY_SIZE(i2c4_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index da7be13aecce..ab95f5391a2b 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c @@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg) static void tegra_mask(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR); } static void tegra_unmask(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET); } static void tegra_ack(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR); } static void tegra_eoi(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR); } static int tegra_retrigger(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return 0; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET); return 1; } @@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d) #ifdef CONFIG_PM_SLEEP static int tegra_set_wake(struct irq_data *d, unsigned int enable) { - u32 irq = d->irq; + u32 irq = d->hwirq; u32 index, mask; if (irq < FIRST_LEGACY_IRQ || diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b3a947863ac7..22ac2a6fbfe3 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -270,7 +270,6 @@ __v7_pj4b_setup: /* Auxiliary Debug Modes Control 1 Register */ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ -#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ /* Auxiliary Debug Modes Control 2 Register */ @@ -293,7 +292,6 @@ __v7_pj4b_setup: /* Auxiliary Debug Modes Control 1 Register */ mrc p15, 1, r0, c15, c1, 1 orr r0, r0, #PJ4B_CLEAN_LINE - orr r0, r0, #PJ4B_BCK_OFF_STREX orr r0, r0, #PJ4B_INTER_PARITY bic r0, r0, #PJ4B_STATIC_BP mcr p15, 1, r0, c15, c1, 1 diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 23259f104c66..afa2b3c4df4a 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend) mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID - mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs @@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume) mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr - mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4cc3b719208e..3d7c2df89946 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, + + /* ICC_SRE_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), + trap_raz_wi }, + /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, @@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, + + /* ICC_SRE */ + { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, + { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, }; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index ec6b9acb6bea..dbe46f43884d 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, _PAGE_AR_RWX | _PAGE_MA_WB); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f43aa536c517..9536ef912f59 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR config ARCH_PHYS_ADDR_T_64BIT def_bool 64BIT_PHYS_ADDR +choice + prompt "SmartMIPS or microMIPS ASE support" + +config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS + bool "None" + help + Select this if you want neither microMIPS nor SmartMIPS support + config CPU_HAS_SMARTMIPS depends on SYS_SUPPORTS_SMARTMIPS - bool "Support for the SmartMIPS ASE" + bool "SmartMIPS" help SmartMIPS is a extension of the MIPS32 architecture aimed at increased security at both hardware and software level for @@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS config CPU_MICROMIPS depends on SYS_SUPPORTS_MICROMIPS - bool "Build kernel using microMIPS ISA" + bool "microMIPS" help When this option is enabled the kernel will be built using the microMIPS ISA +endchoice + config CPU_HAS_MSA bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)" depends on CPU_SUPPORTS_MSA diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e194f957ca8c..fdbff44e5482 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,9 +20,15 @@ #define WORD_INSN ".word" #endif +#ifdef CONFIG_CPU_MICROMIPS +#define NOP_INSN "nop32" +#else +#define NOP_INSN "nop" +#endif + static __always_inline bool arch_static_branch(struct static_key *key) { - asm_volatile_goto("1:\tnop\n\t" + asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 7d28f95b0512..6d69332f21ec 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -41,10 +41,8 @@ #define cpu_has_mcheck 0 #define cpu_has_mdmx 0 #define cpu_has_mips16 0 -#define cpu_has_mips32r1 0 #define cpu_has_mips32r2 0 #define cpu_has_mips3d 0 -#define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 #define cpu_has_mipsmt 0 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index b46cd220a018..22a135ac91de 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -661,6 +661,8 @@ #define MIPS_CONF6_SYND (_ULCAST_(1) << 13) /* proAptiv FTLB on/off bit */ #define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) +/* FTLB probability bits */ +#define MIPS_CONF6_FTLBP_SHIFT (16) #define MIPS_CONF7_WII (_ULCAST_(1) << 31) diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 4520adc8699b..cd6e0afc6833 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr) */ static inline void protected_writeback_dcache_line(unsigned long addr) { +#ifdef CONFIG_EVA + protected_cachee_op(Hit_Writeback_Inv_D, addr); +#else protected_cache_op(Hit_Writeback_Inv_D, addr); +#endif } static inline void protected_writeback_scache_line(unsigned long addr) diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a10951090234..22a5624e2fd2 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -301,7 +301,8 @@ do { \ __get_kernel_common((x), size, __gu_ptr); \ else \ __get_user_common((x), size, __gu_ptr); \ - } \ + } else \ + (x) = 0; \ \ __gu_err; \ }) @@ -316,6 +317,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -630,6 +632,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void); "jal\t" #destination "\n\t" #endif -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -#define DADDI_SCRATCH "$0" -#else +#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ + defined(CONFIG_CPU_HAS_PREFETCH)) #define DADDI_SCRATCH "$3" +#else +#define DADDI_SCRATCH "$0" #endif extern size_t __copy_user(void *__to, const void *__from, size_t __n); @@ -1418,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n) } /* - * strlen_user: - Get the size of a string in user space. + * strnlen_user: - Get the size of a string in user space. * @str: The string to measure. * * Context: User context only. This function may sleep. @@ -1427,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n) * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. + * If the string is too long, returns a value greater than @n. */ static inline long strnlen_user(const char __user *s, long n) { diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 9dc58568f230..d001bb1ad177 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -1045,7 +1045,7 @@ #define __NR_seccomp (__NR_Linux + 316) #define __NR_getrandom (__NR_Linux + 317) #define __NR_memfd_create (__NR_Linux + 318) -#define __NR_memfd_create (__NR_Linux + 319) +#define __NR_bpf (__NR_Linux + 319) /* * Offset of the last N32 flavoured syscall diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index 290c23b51678..86495072a922 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) .set pop - .previous /*********************************************************************** * CPU1 warm restart vector (used for second and subsequent boots). @@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01) jr ra END(bmips_enable_xks01) - - .previous diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index e6e97d2a5c9e..0384b05ab5a0 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -229,6 +229,7 @@ LEAF(mips_cps_core_init) nop .set push + .set mips32r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes) nop .set push + .set mips32r2 .set mt 1: /* Enter VPE configuration state */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 94c4a0c0a577..dc49cf30c2db 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) static char unknown_isa[] = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; +static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) +{ + + unsigned int probability = c->tlbsize / c->tlbsizevtlb; + + /* + * 0 = All TLBWR instructions go to FTLB + * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the + * FTLB and 1 goes to the VTLB. + * 2 = 7:1: As above with 7:1 ratio. + * 3 = 3:1: As above with 3:1 ratio. + * + * Use the linear midpoint as the probability threshold. + */ + if (probability >= 12) + return 1; + else if (probability >= 6) + return 2; + else + /* + * So FTLB is less than 4 times bigger than VTLB. + * A 3:1 ratio can still be useful though. + */ + return 3; +} + static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) { unsigned int config6; @@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) case CPU_P5600: /* proAptiv & related cores use Config6 to enable the FTLB */ config6 = read_c0_config6(); + /* Clear the old probability value */ + config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); if (enable) /* Enable FTLB */ - write_c0_config6(config6 | MIPS_CONF6_FTLBEN); + write_c0_config6(config6 | + (calculate_ftlb_probability(c) + << MIPS_CONF6_FTLBP_SHIFT) + | MIPS_CONF6_FTLBEN); else /* Disable FTLB */ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); @@ -757,31 +788,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; } - set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 6001610cfe55..dda800e9e731 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -18,31 +18,53 @@ #ifdef HAVE_JUMP_LABEL -#define J_RANGE_MASK ((1ul << 28) - 1) +/* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: + * + * - the ISA bit of the target, either 0 or 1 respectively, + * + * - the amount the jump target address is shifted right to fit in the + * immediate field of the machine instruction, either 2 or 1, + * + * - the mask determining the size of the jump region relative to the + * delay-slot instruction, either 256MB or 128MB, + * + * - the jump target alignment, either 4 or 2 bytes. + */ +#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) +#define J_RANGE_SHIFT (2 - J_ISA_BIT) +#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) +#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { + union mips_instruction *insn_p; union mips_instruction insn; - union mips_instruction *insn_p = - (union mips_instruction *)(unsigned long)e->code; - /* Jump only works within a 256MB aligned region. */ - BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + insn_p = (union mips_instruction *)msk_isa16_mode(e->code); + + /* Jump only works within an aligned region its delay slot is in. */ + BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have 4 byte alignment. */ - BUG_ON((e->target & 3) != 0); + /* Target must have the right alignment and ISA must be preserved. */ + BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_ENABLE) { - insn.j_format.opcode = j_op; - insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); - *insn_p = insn; + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { + insn_p->halfword[0] = insn.word >> 16; + insn_p->halfword[1] = insn.word; + } else + *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 31b1b763cb29..c5c4fd54d797 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep) int ret = 0; if (index >= RTLX_CHANNELS) { - pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); + pr_debug("rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { - pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); + pr_debug("rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index d21ec57b6e95..f3b635f86c39 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -485,7 +485,7 @@ static void __init bootmem_init(void) * NOTE: historically plat_mem_setup did the entire platform initialization. * This was rather impractical because it meant plat_mem_setup had to * get away without any kind of memory allocator. To keep old code from - * breaking plat_setup was just renamed to plat_setup and a second platform + * breaking plat_setup was just renamed to plat_mem_setup and a second platform * initialization hook for anything else was introduced. */ @@ -493,7 +493,7 @@ static int usermem __initdata; static int __init early_parse_mem(char *p) { - unsigned long start, size; + phys_t start, size; /* * If a user specifies memory size, we diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 1d57605e4615..16f1e4f2bf3c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -658,13 +658,13 @@ static int signal_setup(void) save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; } else { - save_fp_context = copy_fp_from_sigcontext; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; } #endif /* CONFIG_SMP */ #else - save_fp_context = copy_fp_from_sigcontext;; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; #endif return 0; diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c17ef80cf65a..5d3238af9b5c 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -503,6 +503,7 @@ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra + nop .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile index 0bb9cc9dc621..d87e03330b29 100644 --- a/arch/mips/loongson/common/Makefile +++ b/arch/mips/loongson/common/Makefile @@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o # Serial port support # obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_SERIAL_8250) += serial.o +loongson-serial-$(CONFIG_SERIAL_8250) := serial.o +obj-y += $(loongson-serial-m) $(loongson-serial-y) obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o obj-$(CONFIG_LOONGSON_MC146818) += rtc.o diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 37ed184398c6..42323bcc5d28 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -33,6 +33,7 @@ static struct node_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; +EXPORT_SYMBOL(__node_distances); struct node_data *__node_data[MAX_NUMNODES]; EXPORT_SYMBOL(__node_data); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index fa6ebd4bc9e9..c3917e251f59 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); + htw_stop(); pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); @@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_indexed(); } tlbw_use_hazard(); + htw_start(); flush_itlb_vm(vma); local_irq_restore(flags); } @@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ + htw_stop(); old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); @@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); + htw_start(); out: local_irq_restore(flags); return ret; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b5f228e7eae6..e3328a96e809 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, uasm_l_smp_pgtable_change(l, *p); #endif iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ - if (!m4kc_tlbp_war()) + if (!m4kc_tlbp_war()) { build_tlb_probe_entry(p); + if (cpu_has_htw) { + /* race condition happens, leaving */ + uasm_i_ehb(p); + uasm_i_mfc0(p, wr.r3, C0_INDEX); + uasm_il_bltz(p, r, wr.r3, label_leave); + uasm_i_nop(p); + } + } return wr; } diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c index 20102a6d4141..c427c5778186 100644 --- a/arch/mips/mti-sead3/sead3-leds.c +++ b/arch/mips/mti-sead3/sead3-leds.c @@ -5,7 +5,7 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/leds.h> #include <linux/platform_device.h> @@ -76,8 +76,4 @@ static int __init led_init(void) return platform_device_register(&fled_device); } -module_init(led_init); - -MODULE_AUTHOR("Chris Dearman <chris@mips.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("LED probe driver for SEAD-3"); +device_initcall(led_init); diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile index be358a8050c5..6b43af0a34d9 100644 --- a/arch/mips/netlogic/xlp/Makefile +++ b/arch/mips/netlogic/xlp/Makefile @@ -1,6 +1,10 @@ obj-y += setup.o nlm_hal.o cop2-ex.o dt.o obj-$(CONFIG_SMP) += wakeup.o -obj-$(CONFIG_USB) += usb-init.o -obj-$(CONFIG_USB) += usb-init-xlp2.o -obj-$(CONFIG_SATA_AHCI) += ahci-init.o -obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o +ifdef CONFIG_USB +obj-y += usb-init.o +obj-y += usb-init-xlp2.o +endif +ifdef CONFIG_SATA_AHCI +obj-y += ahci-init.o +obj-y += ahci-init-xlp2.o +endif diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 6854ed5097d2..83a1dfd8f0e3 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, /* This marks the end of the previous function, which means we overran. */ break; - stack_size = (unsigned) stack_adjustment; + stack_size = (unsigned long) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index a95c00f5fb96..a304bcc37e4f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) } unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; +EXPORT_SYMBOL(__node_distances); static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) { diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 4ca90a39d6d0..725247beebec 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -159,8 +159,6 @@ struct pci_dn { int pci_ext_config_space; /* for pci devices */ - bool force_32bit_msi; - struct pci_dev *pcidev; /* back-pointer to the pci device */ #ifdef CONFIG_EEH struct eeh_dev *edev; /* eeh device */ diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index f19b1e5cb060..1ceecdda810b 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev, return -ENODEV; state = eeh_ops->get_state(edev->pe, NULL); - return sprintf(buf, "%0x08x %0x08x\n", + return sprintf(buf, "0x%08x 0x%08x\n", state, edev->pe->state); } diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 155013da27e0..b15194e2c5fc 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus) } EXPORT_SYMBOL(pcibus_to_node); #endif - -static void quirk_radeon_32bit_msi(struct pci_dev *dev) -{ - struct pci_dn *pdn = pci_get_pdn(dev); - - if (pdn) - pdn->force_32bit_msi = true; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S index 23eb9a9441bd..c62be60c7274 100644 --- a/arch/powerpc/kernel/vdso32/getcpu.S +++ b/arch/powerpc/kernel/vdso32/getcpu.S @@ -30,8 +30,8 @@ V_FUNCTION_BEGIN(__kernel_getcpu) .cfi_startproc mfspr r5,SPRN_SPRG_VDSO_READ - cmpdi cr0,r3,0 - cmpdi cr1,r4,0 + cmpwi cr0,r3,0 + cmpwi cr1,r4,0 clrlwi r6,r5,16 rlwinm r7,r5,16,31-15,31-0 beq cr0,1f diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 5e1ed1575aab..b322bfb51343 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) }; /* Print things out */ - if (hmi_evt->version != OpalHMIEvt_V1) { + if (hmi_evt->version < OpalHMIEvt_V1) { pr_err("HMI Interrupt, Unknown event version %d !\n", hmi_evt->version); return; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 468a0f23c7f2..3ba435ec3dcd 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); - struct pci_dn *pdn = pci_get_pdn(dev); unsigned int xive_num = hwirq - phb->msi_base; __be32 data; int rc; @@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, return -ENXIO; /* Force 32-bit MSI on some broken devices */ - if (pdn && pdn->force_32bit_msi) + if (dev->no_64bit_msi) is_64 = 0; /* Assign XIVE to PE */ @@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, if (is_kdump_kernel()) { pr_info(" Issue PHB reset ...\n"); ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); - ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); + ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); } /* Configure M64 window */ diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b2187d0068b8..4b20f2c6b3b2 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(pdev); struct msi_desc *entry; struct msi_msg msg; int hwirq; @@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) return -ENODEV; - if (pdn && pdn->force_32bit_msi && !phb->msi32_support) + if (pdev->no_64bit_msi && !phb->msi32_support) return -ENODEV; list_for_each_entry(entry, &pdev->msi_list, list) { diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 8ab5add4ac82..8b909e94fd9a 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) */ again: if (type == PCI_CAP_ID_MSI) { - if (pdn->force_32bit_msi) { + if (pdev->no_64bit_msi) { rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); if (rc < 0) { /* diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index de40b48b460e..da08ed088157 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, cascade_data->virq = virt_msir; msi->cascade_array[irq_index] = cascade_data; - ret = request_irq(virt_msir, fsl_msi_cascade, 0, + ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD, "fsl-msi-cascade", cascade_data); if (ret) { dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index b988b5addf86..c8efbb37d6e0 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -293,10 +293,10 @@ static inline void disable_surveillance(void) args.token = rtas_token("set-indicator"); if (args.token == RTAS_UNKNOWN_SERVICE) return; - args.nargs = 3; - args.nret = 1; + args.nargs = cpu_to_be32(3); + args.nret = cpu_to_be32(1); args.rets = &args.args[3]; - args.args[0] = SURVEILLANCE_TOKEN; + args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); args.args[1] = 0; args.args[2] = 0; enter_rtas(__pa(&args)); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 5b1b52a04ad6..7e064c68c5ec 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this + * routine can be a nop. + */ +} + extern struct dma_map_ops *dma_ops; extern struct dma_map_ops *leon_dma_ops; extern struct dma_map_ops pci32_dma_ops; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ded8a6774ac9..41a503c15862 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -144,7 +144,7 @@ config INSTRUCTION_DECODER config PERF_EVENTS_INTEL_UNCORE def_bool y - depends on PERF_EVENTS && SUP_SUP_INTEL && PCI + depends on PERF_EVENTS && CPU_SUP_INTEL && PCI config OUTPUT_FORMAT string diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index f48b17df4224..3a52ee0e726d 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -20,7 +20,6 @@ #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define STACKFAULT_STACK 0 #define DOUBLEFAULT_STACK 1 #define NMI_STACK 0 #define DEBUG_STACK 0 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 678205195ae1..75450b2c7be4 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,11 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define DOUBLEFAULT_STACK 1 +#define NMI_STACK 2 +#define DEBUG_STACK 3 +#define MCE_STACK 4 +#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 854053889d4d..547e344a6dc6 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -141,7 +141,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ - _TIF_USER_RETURN_NOTIFY) + _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index bc8352e7010a..707adc6549d8 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void); #ifdef CONFIG_TRACING asmlinkage void trace_page_fault(void); +#define trace_stack_segment stack_segment #define trace_divide_error divide_error #define trace_bounds bounds #define trace_invalid_op invalid_op diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b4f78c9ba19..cfa9b5b2c27a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); static int __init x86_xsave_setup(char *s) { + if (strlen(s)) + return 0; setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVES); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index dd9d6190b08d..2ce9051174e6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -465,6 +465,14 @@ static void mc_bp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); + else if (!uci->mc) + /* + * We might resume and not have applied late microcode but still + * have a newer patch stashed from the early loader. We don't + * have it in uci->mc so we have to load it the same way we're + * applying patches early on the APs. + */ + load_ucode_ap(); } static struct syscore_ops mc_syscore_ops = { diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index adf138eac85c..f9ed429d6e4f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = { .attrs = snbep_uncore_qpi_formats_attr, }; -#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_msr_init_box, \ +#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ .disable_box = snbep_uncore_msr_disable_box, \ .enable_box = snbep_uncore_msr_enable_box, \ .disable_event = snbep_uncore_msr_disable_event, \ .enable_event = snbep_uncore_msr_enable_event, \ .read_counter = uncore_msr_read_counter +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ + .init_box = snbep_uncore_msr_init_box \ + static struct intel_uncore_ops snbep_uncore_msr_ops = { SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; @@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +/* + * Write SBOX Initialization register bit by bit to avoid spurious #GPs + */ +static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + + if (msr) { + u64 init = SNBEP_PMON_BOX_CTL_INT; + u64 flags = 0; + int i; + + for_each_set_bit(i, (unsigned long *)&init, 64) { + flags |= (1ULL << i); + wrmsrl(msr, flags); + } + } +} + +static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .init_box = hswep_uncore_sbox_msr_init_box +}; + static struct attribute *hswep_uncore_sbox_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = { .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, .msr_offset = HSWEP_SBOX_MSR_OFFSET, - .ops = &snbep_uncore_msr_ops, + .ops = &hswep_uncore_sbox_msr_ops, .format_group = &hswep_uncore_sbox_format_group, }; @@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = { SNBEP_UNCORE_PCI_COMMON_INIT(), }; +static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; + +static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); + + return count; +} + static struct intel_uncore_ops hswep_uncore_irp_ops = { .init_box = snbep_uncore_pci_init_box, .disable_box = snbep_uncore_pci_disable_box, .enable_box = snbep_uncore_pci_enable_box, .disable_event = ivbep_uncore_irp_disable_event, .enable_event = ivbep_uncore_irp_enable_event, - .read_counter = ivbep_uncore_irp_read_counter, + .read_counter = hswep_uncore_irp_read_counter, }; static struct intel_uncore_type hswep_uncore_irp = { diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 1abcb50b48ae..ff86f19b5758 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = { [ DEBUG_STACK-1 ] = "#DB", [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", - [ STACKFAULT_STACK-1 ] = "#SS", [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index df088bb03fb3..c0226ab54106 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -828,9 +828,15 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif +.global native_irq_return_iret native_irq_return_iret: + /* + * This may fault. Non-paranoid faults on return to userspace are + * handled by fixup_bad_iret. These include #SS, #GP, and #NP. + * Double-faults due to espfix64 are handled in do_double_fault. + * Other faults here are fatal. + */ iretq - _ASM_EXTABLE(native_irq_return_iret, bad_iret) #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: @@ -858,25 +864,6 @@ native_irq_return_ldt: jmp native_irq_return_iret #endif - .section .fixup,"ax" -bad_iret: - /* - * The iret traps when the %cs or %ss being restored is bogus. - * We've lost the original trap vector and error code. - * #GPF is the most likely one to get for an invalid selector. - * So pretend we completed the iret and took the #GPF in user mode. - * - * We are now running with the kernel GS after exception recovery. - * But error_entry expects us to have user GS to match the user %cs, - * so swap back. - */ - pushq $0 - - SWAPGS - jmp general_protection - - .previous - /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE @@ -922,37 +909,6 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) - /* - * If IRET takes a fault on the espfix stack, then we - * end up promoting it to a doublefault. In that case, - * modify the stack to make it look like we just entered - * the #GP handler from user space, similar to bad_iret. - */ -#ifdef CONFIG_X86_ESPFIX64 - ALIGN -__do_double_fault: - XCPT_FRAME 1 RDI+8 - movq RSP(%rdi),%rax /* Trap on the espfix stack? */ - sarq $PGDIR_SHIFT,%rax - cmpl $ESPFIX_PGD_ENTRY,%eax - jne do_double_fault /* No, just deliver the fault */ - cmpl $__KERNEL_CS,CS(%rdi) - jne do_double_fault - movq RIP(%rdi),%rax - cmpq $native_irq_return_iret,%rax - jne do_double_fault /* This shouldn't happen... */ - movq PER_CPU_VAR(kernel_stack),%rax - subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ - movq %rax,RSP(%rdi) - movq $0,(%rax) /* Missing (lost) #GP error code */ - movq $general_protection,RIP(%rdi) - retq - CFI_ENDPROC -END(__do_double_fault) -#else -# define __do_double_fault do_double_fault -#endif - /* * APIC interrupts. */ @@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0 idtentry bounds do_bounds has_error_code=0 idtentry invalid_op do_invalid_op has_error_code=0 idtentry device_not_available do_device_not_available has_error_code=0 -idtentry double_fault __do_double_fault has_error_code=1 paranoid=1 +idtentry double_fault do_double_fault has_error_code=1 paranoid=1 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 idtentry invalid_TSS do_invalid_TSS has_error_code=1 idtentry segment_not_present do_segment_not_present has_error_code=1 @@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1 +idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xen_debug do_debug has_error_code=0 idtentry xen_int3 do_int3 has_error_code=0 @@ -1399,17 +1355,16 @@ error_sti: /* * There are two places in the kernel that can potentially fault with - * usergs. Handle them here. The exception handlers after iret run with - * kernel gs again, so don't set the user space flag. B stepping K8s - * sometimes report an truncated RIP for IRET exceptions returning to - * compat mode. Check for these here too. + * usergs. Handle them here. B stepping K8s sometimes report a + * truncated RIP for IRET exceptions returning to compat mode. Check + * for these here too. */ error_kernelspace: CFI_REL_OFFSET rcx, RCX+8 incl %ebx leaq native_irq_return_iret(%rip),%rcx cmpq %rcx,RIP+8(%rsp) - je error_swapgs + je error_bad_iret movl %ecx,%eax /* zero extend */ cmpq %rax,RIP+8(%rsp) je bstep_iret @@ -1420,7 +1375,15 @@ error_kernelspace: bstep_iret: /* Fix truncated RIP */ movq %rcx,RIP+8(%rsp) - jmp error_swapgs + /* fall through */ + +error_bad_iret: + SWAPGS + mov %rsp,%rdi + call fixup_bad_iret + mov %rax,%rsp + decl %ebx /* Return to usergs */ + jmp error_sti CFI_ENDPROC END(error_entry) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 749b0e423419..e510618b2e91 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) */ if (work & _TIF_NOHZ) { user_exit(); - work &= ~TIF_NOHZ; + work &= ~_TIF_NOHZ; } #ifdef CONFIG_SECCOMP diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 0d0e922fafc1..de801f22128a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) -#ifdef CONFIG_X86_32 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) -#endif DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) -{ - enum ctx_state prev_state; - - prev_state = exception_enter(); - if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - preempt_conditional_sti(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); - } - exception_exit(prev_state); -} - dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; +#ifdef CONFIG_X86_ESPFIX64 + extern unsigned char native_irq_return_iret[]; + + /* + * If IRET takes a non-IST fault on the espfix64 stack, then we + * end up promoting it to a doublefault. In that case, modify + * the stack to make it look like we just entered the #GP + * handler from user space, similar to bad_iret. + */ + if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + regs->cs == __KERNEL_CS && + regs->ip == (unsigned long)native_irq_return_iret) + { + struct pt_regs *normal_regs = task_pt_regs(current); + + /* Fake a #GP(0) from userspace. */ + memmove(&normal_regs->ip, (void *)regs->sp, 5*8); + normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&normal_regs->orig_ax; + return; + } +#endif + exception_enter(); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs) return regs; } NOKPROBE_SYMBOL(sync_regs); + +struct bad_iret_stack { + void *error_entry_ret; + struct pt_regs regs; +}; + +asmlinkage __visible +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) +{ + /* + * This is called from entry_64.S early in handling a fault + * caused by a bad iret to user mode. To handle the fault + * correctly, we want move our stack frame to task_pt_regs + * and we want to pretend that the exception came from the + * iret target. + */ + struct bad_iret_stack *new_stack = + container_of(task_pt_regs(current), + struct bad_iret_stack, regs); + + /* Copy the IRET target to the new stack. */ + memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); + + /* Copy the remainder of the stack from the current stack. */ + memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); + + BUG_ON(!user_mode_vm(&new_stack->regs)); + return new_stack; +} #endif /* @@ -778,7 +815,7 @@ void __init trap_init(void) set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); set_intr_gate(X86_TRAP_TS, invalid_TSS); set_intr_gate(X86_TRAP_NP, segment_not_present); - set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); + set_intr_gate(X86_TRAP_SS, stack_segment); set_intr_gate(X86_TRAP_GP, general_protection); set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); set_intr_gate(X86_TRAP_MF, coprocessor_error); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ac1c4de3a484..978f402006ee 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep) * kvm mmu, before reclaiming the page, we should * unmap it from mmu first. */ - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, - kvm_is_mmio_pfn(pfn)); + kvm_is_reserved_pfn(pfn)); if (host_writable) spte |= SPTE_HOST_WRITEABLE; @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, * PT_PAGE_TABLE_LEVEL and there would be no adjustment done * here. */ - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompound(pfn_to_page(pfn)) && !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4cb8763868fc..4e5dfec750fc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1123,7 +1123,7 @@ void mark_rodata_ro(void) unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); - unsigned long all_end = PFN_ALIGN(&_end); + unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -1134,7 +1134,16 @@ void mark_rodata_ro(void) /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. + * + * We align all_end to PMD_SIZE because the existing mapping + * is a full PMD. If we would align _brk_end to PAGE_SIZE we + * split the PMD and the reminder between _brk_end and the end + * of the PMD will remain mapped executable. + * + * Any PMD which was setup after the one which covers _brk_end + * has been zapped already via cleanup_highmem(). */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl index 0b0b124d3ece..23210baade2d 100644 --- a/arch/x86/tools/calc_run_size.pl +++ b/arch/x86/tools/calc_run_size.pl @@ -19,7 +19,16 @@ while (<>) { if ($file_offset == 0) { $file_offset = $offset; } elsif ($file_offset != $offset) { - die ".bss and .brk lack common file offset\n"; + # BFD linker shows the same file offset in ELF. + # Gold linker shows them as consecutive. + next if ($file_offset + $mem_size == $offset + $size); + + printf STDERR "file_offset: 0x%lx\n", $file_offset; + printf STDERR "mem_size: 0x%lx\n", $mem_size; + printf STDERR "offset: 0x%lx\n", $offset; + printf STDERR "size: 0x%lx\n", $size; + + die ".bss and .brk are non-contiguous\n"; } } } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 143ec6ea1468..7db193160766 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -878,7 +878,7 @@ int acpi_dev_suspend_late(struct device *dev) return 0; target_state = acpi_target_system_state(); - wakeup = device_may_wakeup(dev); + wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); error = acpi_device_wakeup(adev, target_state, wakeup); if (wakeup && error) return error; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 7652e8dc188f..21b0bc6a9c96 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); if (!card->config_regs) { dev_warn(&dev->dev, "Failed to ioremap config registers\n"); + err = -ENOMEM; goto out_release_regions; } card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); if (!card->buffers) { dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); + err = -ENOMEM; goto out_unmap_config; } diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 24b5b020753a..a23ac0c724f0 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, tmp = pmc_read(pmc, AT91_PMC_USB); usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; - return parent_rate / (usbdiv + 1); + + return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); } static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { unsigned long div; - unsigned long bestrate; - unsigned long tmp; + + if (!rate) + return -EINVAL; if (rate >= *parent_rate) return *parent_rate; - div = *parent_rate / rate; - if (div >= SAM9X5_USB_MAX_DIV) - return *parent_rate / (SAM9X5_USB_MAX_DIV + 1); - - bestrate = *parent_rate / div; - tmp = *parent_rate / (div + 1); - if (bestrate - rate > rate - tmp) - bestrate = tmp; + div = DIV_ROUND_CLOSEST(*parent_rate, rate); + if (div > SAM9X5_USB_MAX_DIV + 1) + div = SAM9X5_USB_MAX_DIV + 1; - return bestrate; + return DIV_ROUND_CLOSEST(*parent_rate, div); } static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) @@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, u32 tmp; struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); struct at91_pmc *pmc = usb->pmc; - unsigned long div = parent_rate / rate; + unsigned long div; + + if (!rate) + return -EINVAL; - if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV) + div = DIV_ROUND_CLOSEST(parent_rate, rate); + if (div > SAM9X5_USB_MAX_DIV + 1 || !div) return -EINVAL; tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; @@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, tmp_parent_rate = rate * usb->divisors[i]; tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); - tmprate = tmp_parent_rate / usb->divisors[i]; + tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]); if (tmprate < rate) tmpdiff = rate - tmprate; else @@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, struct at91_pmc *pmc = usb->pmc; unsigned long div; - if (!rate || parent_rate % rate) + if (!rate) return -EINVAL; - div = parent_rate / rate; + div = DIV_ROUND_CLOSEST(parent_rate, rate); for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { if (usb->divisors[i] == div) { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 18a9de29df0e..c0a842b335c5 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!rate) rate = 1; + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + bestdiv = readl(divider->reg) >> divider->shift; + bestdiv &= div_mask(divider); + bestdiv = _get_div(divider, bestdiv); + return bestdiv; + } + maxdiv = _get_maxdiv(divider); if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { @@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = { }; EXPORT_SYMBOL_GPL(clk_divider_ops); -const struct clk_ops clk_divider_ro_ops = { - .recalc_rate = clk_divider_recalc_rate, -}; -EXPORT_SYMBOL_GPL(clk_divider_ro_ops); - static struct clk *_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, @@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, } init.name = name; - if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) - init.ops = &clk_divider_ro_ops; - else - init.ops = &clk_divider_ops; + init.ops = &clk_divider_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c index b345cc791e5d..88b9fe13fa44 100644 --- a/drivers/clk/pxa/clk-pxa27x.c +++ b/drivers/clk/pxa/clk-pxa27x.c @@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw, unsigned long ccsr = CCSR; osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); - a = cccr & CCCR_A_BIT; + a = cccr & (1 << CCCR_A_BIT); l = ccsr & CCSR_L_MASK; if (osc_forced || a) @@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw) unsigned long ccsr = CCSR; osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); - a = cccr & CCCR_A_BIT; + a = cccr & (1 << CCCR_A_BIT); if (osc_forced) return PXA_MEM_13Mhz; if (a) diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index dab988ab8cf1..157139a5c1ca 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = { [ESC1_CLK_SRC] = &esc1_clk_src.clkr, [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, - [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, + [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, [MAPLE_CLK_SRC] = &maple_clk_src.clkr, [VDP_CLK_SRC] = &vdp_clk_src.clkr, diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 1e68bff481b8..880a266f0143 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, div->width = div_width; div->lock = lock; div->table = div_table; - div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) - ? &clk_divider_ro_ops - : &clk_divider_ops; + div_ops = &clk_divider_ops; } clk = clk_register_composite(NULL, name, parent_names, num_parents, diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index efb17c3ee120..f4a9c0058b4d 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node) /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + sun4i_clockevent.cpumask = cpu_possible_mask; + sun4i_clockevent.irq = irq; + + clockevents_config_and_register(&sun4i_clockevent, rate, + TIMER_SYNC_TICKS, 0xffffffff); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); @@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node) /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); - - sun4i_clockevent.cpumask = cpu_possible_mask; - sun4i_clockevent.irq = irq; - - clockevents_config_and_register(&sun4i_clockevent, rate, - TIMER_SYNC_TICKS, 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", sun4i_timer_init); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4839bfa74a10..19a99743cf52 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -271,7 +271,7 @@ struct pl330_config { #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ - unsigned int data_buf_dep:10; + unsigned int data_buf_dep:11; unsigned int num_chan:4; unsigned int num_peri:6; u32 peri_ns; @@ -2336,7 +2336,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) int burst_len; burst_len = pl330->pcfg.data_bus_width / 8; - burst_len *= pl330->pcfg.data_buf_dep; + burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ @@ -2459,16 +2459,25 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, /* Select max possible burst size */ burst = pl330->pcfg.data_bus_width / 8; - while (burst > 1) { - if (!(len % burst)) - break; + /* + * Make sure we use a burst size that aligns with all the memcpy + * parameters because our DMA programming algorithm doesn't cope with + * transfers which straddle an entry in the DMA device's MFIFO. + */ + while ((src | dst | len) & (burst - 1)) burst /= 2; - } desc->rqcfg.brst_size = 0; while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + /* + * If burst size is smaller than bus width then make sure we only + * transfer one at a time to avoid a burst stradling an MFIFO entry. + */ + if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) + desc->rqcfg.brst_len = 1; + desc->rqcfg.brst_len = get_burst_len(desc, len); desc->txd.flags = flags; @@ -2732,7 +2741,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&adev->dev, - "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 3aa10b328254..91292f5513ff 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, readl(pchan->base + DMA_CHAN_CUR_PARA)); } -static inline int convert_burst(u32 maxburst, u8 *burst) +static inline s8 convert_burst(u32 maxburst) { switch (maxburst) { case 1: - *burst = 0; - break; + return 0; case 8: - *burst = 2; - break; + return 2; default: return -EINVAL; } - - return 0; } -static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) +static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) { if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) return -EINVAL; - *width = addr_width >> 1; - return 0; + return addr_width >> 1; } static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, @@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, struct dma_slave_config *config) { u8 src_width, dst_width, src_burst, dst_burst; - int ret; if (!config) return -EINVAL; - ret = convert_burst(config->src_maxburst, &src_burst); - if (ret) - return ret; + src_burst = convert_burst(config->src_maxburst); + if (src_burst) + return src_burst; - ret = convert_burst(config->dst_maxburst, &dst_burst); - if (ret) - return ret; + dst_burst = convert_burst(config->dst_maxburst); + if (dst_burst) + return dst_burst; - ret = convert_buswidth(config->src_addr_width, &src_width); - if (ret) - return ret; + src_width = convert_buswidth(config->src_addr_width); + if (src_width) + return src_width; - ret = convert_buswidth(config->dst_addr_width, &dst_width); - if (ret) - return ret; + dst_width = convert_buswidth(config->dst_addr_width); + if (dst_width) + return dst_width; lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | DMA_CHAN_CFG_SRC_WIDTH(src_width) | @@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( { struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); struct sun6i_vchan *vchan = to_sun6i_vchan(chan); - struct dma_slave_config *sconfig = &vchan->cfg; struct sun6i_dma_lli *v_lli; struct sun6i_desc *txd; dma_addr_t p_lli; - int ret; + s8 burst, width; dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", @@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( goto err_txd_free; } - ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); - if (ret) - goto err_dma_free; + v_lli->src = src; + v_lli->dst = dest; + v_lli->len = len; + v_lli->para = NORMAL_WAIT; + burst = convert_burst(8); + width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE; + DMA_CHAN_CFG_SRC_LINEAR_MODE | + DMA_CHAN_CFG_SRC_BURST(burst) | + DMA_CHAN_CFG_SRC_WIDTH(width) | + DMA_CHAN_CFG_DST_BURST(burst) | + DMA_CHAN_CFG_DST_WIDTH(width); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); -err_dma_free: - dma_pool_free(sdev->pool, v_lli, p_lli); err_txd_free: kfree(txd); return NULL; @@ -915,6 +913,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; sdc->slave.device_control = sun6i_dma_control; sdc->slave.chancnt = NR_MAX_VCHANS; + sdc->slave.copy_align = 4; sdc->slave.dev = &pdev->dev; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1403b01e8216..318ade9bb5af 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1670,15 +1670,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_kick_out_vgacon(dev_priv); + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } - ret = i915_kick_out_firmware_fb(dev_priv); + ret = i915_kick_out_vgacon(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f0a1a56406eb..8bcdb981d540 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9408,6 +9408,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (i915_reset_in_progress(&dev_priv->gpu_error) || + crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + return true; + /* * The relevant registers doen't exist on pre-ctg. * As the flip done interrupt doesn't trigger for mmio diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ad45bfff3fe..4bcd91757321 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4450,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c27b6140bfd1..ad2fd605f76b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5469,11 +5469,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(_3D_CHICKEN, _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - /* WaSetupGtModeTdRowDispatch:snb */ - if (IS_SNB_GT1(dev)) - I915_WRITE(GEN6_GT_MODE, - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); - /* WaDisable_RenderCache_OperationalFlush:snb */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index f6309bd23e01..b5c73df8e202 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1256,7 +1256,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); pt = &ppt->power_tune_table; } else { ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 300c4b3d4669..26baa9c05f6c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector) } if (!radeon_connector->edid) { + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + return; + if (rdev->is_atom_bios) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || @@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); enum drm_connector_status ret = connector_status_disconnected; @@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; - + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* check for edid as well */ @@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 9a19e52cc655..6b670b0bc47b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; + /* disable native backlight control on older asics */ + else if (rdev->family < CHIP_R600) + use_bl = false; else use_bl = true; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 7784911d78ef..00fc59762e0d 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) return false; + /* + * Older chips have a HW limitation, they can only generate 40 bits + * of address for "64-bit" MSIs which breaks on some platforms, notably + * IBM POWER servers, so we limit them + */ + if (rdev->family < CHIP_BONAIRE) { + dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); + rdev->pdev->no_64bit_msi = 1; + } + /* force MSI on */ if (radeon_msi == 1) return true; diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 6aac695b1688..9b55e673b67c 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id) if (ret) goto clock_dis; - data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, - client->name, - data, - g762_groups); + data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name, + data, g762_groups); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto clock_dis; diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 22c096ce39ad..513bd6d14293 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -44,6 +44,9 @@ #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B #define BMC150_ACCEL_ANY_MOTION_MASK 0x07 +#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0) +#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1) +#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2) #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) #define BMC150_ACCEL_REG_PMU_LPW 0x11 @@ -92,9 +95,9 @@ #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF /* Slope duration in terms of number of samples */ -#define BMC150_ACCEL_DEF_SLOPE_DURATION 2 +#define BMC150_ACCEL_DEF_SLOPE_DURATION 1 /* in terms of multiples of g's/LSB, based on range */ -#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 5 +#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1 #define BMC150_ACCEL_REG_XOUT_L 0x02 @@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) if (ret < 0) { dev_err(&data->client->dev, "Failed: bmc150_accel_set_power_state for %d\n", on); + if (on) + pm_runtime_put_noidle(&data->client->dev); + return ret; } @@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev, ret = bmc150_accel_setup_any_motion_interrupt(data, state); if (ret < 0) { + bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = { static const struct iio_event_spec bmc150_accel_event = { .type = IIO_EV_TYPE_ROC, - .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_EITHER, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_PERIOD) @@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig, else ret = bmc150_accel_setup_new_data_interrupt(data, state); if (ret < 0) { + bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private) else dir = IIO_EV_DIR_RISING; - if (ret & BMC150_ACCEL_ANY_MOTION_MASK) + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, + 0, + IIO_MOD_X, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, - IIO_MOD_X_OR_Y_OR_Z, + IIO_MOD_Y, IIO_EV_TYPE_ROC, - IIO_EV_DIR_EITHER), + dir), + data->timestamp); + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, + 0, + IIO_MOD_Z, + IIO_EV_TYPE_ROC, + dir), data->timestamp); ack_intr_status: if (!data->dready_trigger_on) @@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct bmc150_accel_data *data = iio_priv(indio_dev); + int ret; dev_dbg(&data->client->dev, __func__); + ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); + if (ret < 0) + return -EAGAIN; - return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); + return 0; } static int bmc150_accel_runtime_resume(struct device *dev) diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index a23e58c4ed99..320aa72c0349 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index) return ret; } + ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 | + KXCJK1013_REG_CTRL1_BIT_GSEL1); ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index b58d6302521f..d095efe1ba14 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev) static const struct mcb_device_id men_z188_ids[] = { { .device = 0xbc }, + { } }; MODULE_DEVICE_TABLE(mcb, men_z188_ids); diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c index 1f967e0d688e..d2fa526740ca 100644 --- a/drivers/iio/gyro/bmg160.c +++ b/drivers/iio/gyro/bmg160.c @@ -67,6 +67,9 @@ #define BMG160_REG_INT_EN_0 0x15 #define BMG160_DATA_ENABLE_INT BIT(7) +#define BMG160_REG_INT_EN_1 0x16 +#define BMG160_INT1_BIT_OD BIT(1) + #define BMG160_REG_XOUT_L 0x02 #define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2)) @@ -82,6 +85,9 @@ #define BMG160_REG_INT_STATUS_2 0x0B #define BMG160_ANY_MOTION_MASK 0x07 +#define BMG160_ANY_MOTION_BIT_X BIT(0) +#define BMG160_ANY_MOTION_BIT_Y BIT(1) +#define BMG160_ANY_MOTION_BIT_Z BIT(2) #define BMG160_REG_TEMP 0x08 #define BMG160_TEMP_CENTER_VAL 23 @@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data) data->slope_thres = ret; /* Set default interrupt mode */ + ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1); + if (ret < 0) { + dev_err(&data->client->dev, "Error reading reg_int_en_1\n"); + return ret; + } + ret &= ~BMG160_INT1_BIT_OD; + ret = i2c_smbus_write_byte_data(data->client, + BMG160_REG_INT_EN_1, ret); + if (ret < 0) { + dev_err(&data->client->dev, "Error writing reg_int_en_1\n"); + return ret; + } + ret = i2c_smbus_write_byte_data(data->client, BMG160_REG_INT_RST_LATCH, BMG160_INT_MODE_LATCH_INT | @@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on) if (ret < 0) { dev_err(&data->client->dev, "Failed: bmg160_set_power_state for %d\n", on); + if (on) + pm_runtime_put_noidle(&data->client->dev); + return ret; } #endif @@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev, ret = bmg160_setup_any_motion_interrupt(data, state); if (ret < 0) { + bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = { static const struct iio_event_spec bmg160_event = { .type = IIO_EV_TYPE_ROC, - .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_EITHER, .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) }; @@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig, else ret = bmg160_setup_new_data_interrupt(data, state); if (ret < 0) { + bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private) else dir = IIO_EV_DIR_FALLING; - if (ret & BMG160_ANY_MOTION_MASK) + if (ret & BMG160_ANY_MOTION_BIT_X) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, 0, - IIO_MOD_X_OR_Y_OR_Z, + IIO_MOD_X, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMG160_ANY_MOTION_BIT_Y) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, + 0, + IIO_MOD_Y, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMG160_ANY_MOTION_BIT_Z) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, + 0, + IIO_MOD_Z, IIO_EV_TYPE_ROC, dir), data->timestamp); @@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct bmg160_data *data = iio_priv(indio_dev); + int ret; + + ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND); + if (ret < 0) { + dev_err(&data->client->dev, "set mode failed\n"); + return -EAGAIN; + } - return bmg160_set_mode(data, BMG160_MODE_SUSPEND); + return 0; } static int bmg160_runtime_resume(struct device *dev) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3effa931fce2..10641b7816f4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READ.. + * work-around for RDMA_READs with ConnectX-2. + * + * Also, still make sure to have at least two SGEs for + * outgoing control PDU responses. */ - attr.cap.max_send_sge = device->dev_attr.max_sge - 2; + attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); isert_conn->max_sge = attr.cap.max_send_sge; attr.cap.max_recv_sge = 1; @@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) struct isert_cq_desc *cq_desc; struct ib_device_attr *dev_attr; int ret = 0, i, j; + int max_rx_cqe, max_tx_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { @@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_rx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_RX_CQ_LEN, i); + max_rx_cqe, i); if (IS_ERR(device->dev_rx_cq[i])) { ret = PTR_ERR(device->dev_rx_cq[i]); device->dev_rx_cq[i] = NULL; @@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_TX_CQ_LEN, i); + max_tx_cqe, i); if (IS_ERR(device->dev_tx_cq[i])) { ret = PTR_ERR(device->dev_tx_cq[i]); device->dev_tx_cq[i] = NULL; @@ -803,14 +810,25 @@ wake_up: complete(&isert_conn->conn_wait); } -static void +static int isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn; + + if (!cma_id->qp) { + struct isert_np *isert_np = cma_id->context; + + isert_np->np_cm_id = NULL; + return -1; + } + + isert_conn = (struct isert_conn *)cma_id->context; isert_conn->disconnect = disconnect; INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); schedule_work(&isert_conn->conn_logout_work); + + return 0; } static int @@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); + if (ret) + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + event->event, ret); break; case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); @@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ disconnect = true; case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ - isert_disconnected_handler(cma_id, disconnect); + ret = isert_disconnected_handler(cma_id, disconnect); break; case RDMA_CM_EVENT_CONNECT_ERROR: default: @@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) break; } - if (ret != 0) { - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", - event->event, ret); - dump_stack(); - } - return ret; } @@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = (struct isert_np *)np->np_context; - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->np_cm_id) + rdma_destroy_id(isert_np->np_cm_id); np->np_context = NULL; kfree(isert_np); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7206547c13ce..dc829682701a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) if (!qp_init) goto out; +retry: ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->rq_size + srp_sq_size, 0); if (IS_ERR(ch->cq)) { @@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ch->qp = ib_create_qp(sdev->pd, qp_init); if (IS_ERR(ch->qp)) { ret = PTR_ERR(ch->qp); + if (ret == -ENOMEM) { + srp_sq_size /= 2; + if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { + ib_destroy_cq(ch->cq); + goto retry; + } + } printk(KERN_ERR "failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 2ed7905a068f..fc55f0d15b70 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id } ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; - usb_fill_bulk_urb(xpad->bulk_out, udev, - usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), - xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); + if (usb_endpoint_is_bulk_out(ep_irq_in)) { + usb_fill_bulk_urb(xpad->bulk_out, udev, + usb_sndbulkpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad); + } else { + usb_fill_int_urb(xpad->bulk_out, udev, + usb_sndintpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad, 0); + } /* * Submit the int URB immediately rather than waiting for open diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 3fcb6b3cb0bd..f2b978026407 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse, int x, y; u32 t; - if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev, - !tp_dev, - psmouse_fmt("Unexpected trackpoint message\n"))) { - if (etd->debug == 1) - elantech_packet_dump(psmouse); - return; - } - t = get_unaligned_le32(&packet[0]); switch (t & ~7U) { @@ -793,7 +785,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) unsigned char packet_type = packet[3] & 0x03; bool sanity_check; - if ((packet[3] & 0x0f) == 0x06) + if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) return PACKET_TRACKPOINT; /* diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 2a7a9174c702..f9472920d986 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = { (const char * const []){"LEN2001", NULL}, 1024, 5022, 2508, 4832 }, + { + (const char * const []){"LEN2006", NULL}, + 1264, 5675, 1171, 4688 + }, { } }; diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 6ae3cdee0681..cc4f9d80122e 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, } ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, - handle_level_irq, 0, 0, - IRQCHIP_SKIP_SET_WAKE); + handle_fasteoi_irq, + IRQ_NOREQUEST | IRQ_NOPROBE | + IRQ_NOAUTOEN, 0, 0); if (ret) goto err_domain_remove; @@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, gc->unused = 0; gc->wake_enabled = ~0; gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; - gc->chip_types[0].handler = handle_fasteoi_irq; gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index b9f4fb808e49..5fb38a2ac226 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn, int parent_irq; parent_irq = irq_of_parse_and_map(dn, irq); - if (parent_irq < 0) { + if (!parent_irq) { pr_err("failed to map interrupt %d\n", irq); - return parent_irq; + return -EINVAL; } data->irq_map_mask |= be32_to_cpup(map_mask + irq); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index c15c840987d2..14691a4cb84c 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, __raw_writel(0xffffffff, data->base + CPU_CLEAR); data->parent_irq = irq_of_parse_and_map(np, 0); - if (data->parent_irq < 0) { + if (!data->parent_irq) { pr_err("failed to find parent interrupt\n"); - ret = data->parent_irq; + ret = -EINVAL; goto out_unmap; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9ac06cfe6b7..a5115fb7cf33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) bond_update_slave_arr(bond, NULL); - } else if (do_failover) { + } + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 02492d241e4c..2cfe5012e4e5 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, long rate; u64 v64; - /* Use CIA recommended sample points */ + /* Use CiA recommended sample points */ if (bt->sample_point) { sampl_pt = bt->sample_point; } else { @@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { - kfree_skb(priv->echo_skb[idx]); + dev_kfree_skb_any(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index fca5482c09ac..04f20dd39007 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,4 +1,5 @@ config CAN_M_CAN + depends on HAS_IOMEM tristate "Bosch M_CAN devices" ---help--- Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 10d571eaed85..d7bc462aafdc 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -105,14 +105,36 @@ enum m_can_mram_cfg { MRAM_CFG_NUM, }; +/* Fast Bit Timing & Prescaler Register (FBTP) */ +#define FBTR_FBRP_MASK 0x1f +#define FBTR_FBRP_SHIFT 16 +#define FBTR_FTSEG1_SHIFT 8 +#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) +#define FBTR_FTSEG2_SHIFT 4 +#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) +#define FBTR_FSJW_SHIFT 0 +#define FBTR_FSJW_MASK 0x3 + /* Test Register (TEST) */ #define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ -#define CCCR_TEST BIT(7) -#define CCCR_MON BIT(5) -#define CCCR_CCE BIT(1) -#define CCCR_INIT BIT(0) +#define CCCR_TEST BIT(7) +#define CCCR_CMR_MASK 0x3 +#define CCCR_CMR_SHIFT 10 +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK 0x3 +#define CCCR_CME_SHIFT 8 +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) +#define CCCR_CANFD 0x10 /* Bit Timing & Prescaler Register (BTP) */ #define BTR_BRP_MASK 0x3ff @@ -204,6 +226,7 @@ enum m_can_mram_cfg { /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ #define M_CAN_RXESC_8BYTES 0x0 +#define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ #define TXBC_NDTB_OFF 16 @@ -211,6 +234,7 @@ enum m_can_mram_cfg { /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 +#define TXESC_TBDS_64BYTES 0x7 /* Tx Event FIFO Con.guration (TXEFC) */ #define TXEFC_EFS_OFF 16 @@ -219,11 +243,11 @@ enum m_can_mram_cfg { /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 -#define RXF0_ELEMENT_SIZE 16 -#define RXF1_ELEMENT_SIZE 16 +#define RXF0_ELEMENT_SIZE 72 +#define RXF1_ELEMENT_SIZE 72 #define RXB_ELEMENT_SIZE 16 #define TXE_ELEMENT_SIZE 8 -#define TXB_ELEMENT_SIZE 16 +#define TXB_ELEMENT_SIZE 72 /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 @@ -231,11 +255,17 @@ enum m_can_mram_cfg { #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) /* Rx Buffer Element */ +/* R0 */ #define RX_BUF_ESI BIT(31) #define RX_BUF_XTD BIT(30) #define RX_BUF_RTR BIT(29) +/* R1 */ +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_EDL BIT(21) +#define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ +/* R0 */ #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) @@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, if (enable) { /* enable m_can configuration */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { @@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) m_can_write(priv, M_CAN_ILE, 0x0); } -static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, - u32 rxfs) +static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { + struct net_device_stats *stats = &dev->stats; struct m_can_priv *priv = netdev_priv(dev); - u32 id, fgi; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, fgi, dlc; + int i; /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + if (dlc & RX_BUF_EDL) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dlc & RX_BUF_EDL) + cf->len = can_dlc2len((dlc >> 16) & 0x0F); + else + cf->len = get_can_dlc((dlc >> 16) & 0x0F); + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_RTR) { + if (id & RX_BUF_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(dev, "ESI Error\n"); + } + + if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); - *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(0)); - *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(1)); + if (dlc & RX_BUF_BRS) + cf->flags |= CANFD_BRS; + + for (i = 0; i < cf->len; i += 4) + *(u32 *)(cf->data + i) = + m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ m_can_write(priv, M_CAN_RXF0A, fgi); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + + netif_receive_skb(skb); } static int m_can_do_rx_poll(struct net_device *dev, int quota) { struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *frame; u32 pkts = 0; u32 rxfs; @@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) if (rxfs & RXFS_RFL) netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - skb = alloc_can_skb(dev, &frame); - if (!skb) { - stats->rx_dropped++; - return pkts; - } - - m_can_read_fifo(dev, frame, rxfs); - - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - - netif_receive_skb(skb); + m_can_read_fifo(dev, rxfs); quota--; pkts++; @@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev, return 1; } +static int __m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + return 0; +} + static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); - unsigned int ecr; int err; err = clk_prepare_enable(priv->hclk); @@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, return err; } - ecr = m_can_read(priv, M_CAN_ECR); - bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = ecr & ECR_TEC_MASK; + __m_can_get_berr_counter(dev, bec); clk_disable_unprepare(priv->cclk); clk_disable_unprepare(priv->hclk); @@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev, if (unlikely(!skb)) return 0; - m_can_get_berr_counter(dev, &bec); + __m_can_get_berr_counter(dev, &bec); switch (new_state) { case CAN_STATE_ERROR_ACTIVE: @@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr) if ((psr & PSR_EP) && (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if ((psr & PSR_BO) && (priv->can.state != CAN_STATE_BUS_OFF)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); } @@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_BEU) + if (irqstatus & IR_ELO) netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); @@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const m_can_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev) reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); m_can_write(priv, M_CAN_BTP, reg_btp); - netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | + (tseg1 << FBTR_FTSEG1_SHIFT) | + (tseg2 << FBTR_FTSEG2_SHIFT); + m_can_write(priv, M_CAN_FBTP, reg_btp); + } return 0; } @@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_config_endisable(priv, true); - /* RX Buffer/FIFO Element Size 8 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + /* RX Buffer/FIFO Element Size 64 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); @@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | priv->mcfg[MRAM_TXB].off); - /* only support 8 bytes firstly */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + /* support 64 bytes payload */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | priv->mcfg[MRAM_TXE].off); @@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev) RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_TEST | CCCR_MON); + cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; @@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev) test |= TEST_LBCK; } + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); @@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void) priv->dev = dev; priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING; + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD; return dev; } @@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); - struct can_frame *cf = (struct can_frame *)skb->data; - u32 id; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 id, cccr; + int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, /* message ram configuration */ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); + + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + can_put_echo_skb(skb, dev, 0); + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; + else + cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; + } else { + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + } + m_can_write(priv, M_CAN_CCCR, cccr); + } + /* enable first TX buffer to start transfer */ m_can_write(priv, M_CAN_TXBTIE, 0x1); m_can_write(priv, M_CAN_TXBAR, 0x1); @@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static int register_m_can_dev(struct net_device *dev) @@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev, struct resource *res; void __iomem *addr; u32 out_val[MRAM_CFG_LEN]; - int ret; + int i, start, end, ret; /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); @@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev, priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = priv->mcfg[MRAM_SIDF].off; + end = priv->mcfg[MRAM_TXB].off + + priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + for (i = start; i < end; i += 4) + writel(0x0, priv->mram_base + i); + return 0; } diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 1abe133d1594..9718248e55f1 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 8ff3424d5147..15c00faeec61 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, struct net_device *dev; struct sja1000_priv *priv; struct kvaser_pci *board; - int err, init_step; + int err; dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); if (dev == NULL) @@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, if (channel == 0) { board->xilinx_ver = ioread8(board->res_addr + XILINX_VERINT) >> 4; - init_step = 2; /* Assert PTADR# - we're in passive mode so the other bits are not important */ @@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; - init_step = 4; - dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", priv->reg_base, board->conf_addr, dev->irq); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 00f2534dde73..29d3f0938eb8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb) if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; - u8 msg_count, again, start; + u8 msg_count, start; msg_count = ibuf[0] & ~0x80; - again = ibuf[0] & 0x80; start = CPC_HEADER_SIZE; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index b7c9e8b11460..c063a54ab8dd 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) { struct esd_tx_urb_context *context = urb->context; struct esd_usb2_net_priv *priv; - struct esd_usb2 *dev; struct net_device *netdev; size_t size = sizeof(struct esd_usb2_msg); @@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) priv = context->priv; netdev = priv->netdev; - dev = priv->usb2; /* free up our allocated buffer */ usb_free_coherent(urb->dev, size, @@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) } } unlink_all_urbs(dev); + kfree(dev); } } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 04b0f84612f0..009acc8641fc 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5e8b5609c067..8a998e3884ce 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 err, reg_msr, reg_sr_mask; + u32 reg_msr, reg_sr_mask; + int err; unsigned long timeout; /* Check if it is in reset mode */ @@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, + .ndo_change_mtu = can_change_mtu, }; /** diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b9625968daac..4f4c2a7888e5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; + core_writel(priv, reg, CORE_WATCHDOG_CTRL); + + do { + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + if (!(reg & SOFTWARE_RESET)) + break; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + if (timeout == 0) + return -ETIMEDOUT; + + return 0; +} + static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; @@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); - return -ENODEV; + ret = -ENOMEM; + goto out_unmap; } base++; } + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("unable to software reset switch: %d\n", ret); + goto out_unmap; + } + /* Disable all interrupts and request them */ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); @@ -484,7 +514,8 @@ out_free_irq0: out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - iounmap(*base); + if (*base) + iounmap(*base); base++; } return ret; @@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) return 0; } -static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 1000; - u32 reg; - - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; - core_writel(priv, reg, CORE_WATCHDOG_CTRL); - - do { - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - if (!(reg & SOFTWARE_RESET)) - break; - - usleep_range(1000, 2000); - } while (timeout-- > 0); - - if (timeout == 0) - return -ETIMEDOUT; - - return 0; -} - static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index dbb41c1923e6..77f8f836cbbe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp) if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + if (tnapi->prodring.rx_std && + tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index cca604994003..4fe33606f372 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -1082,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); for (i = 0; i < CXGB4_MAX_PRIORITY; i++) - pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; + pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9a18e7930b31..597c463e384d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; @@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); } + +static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops be_netdev_ops = { @@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_BE2NET_VXLAN .ndo_add_vxlan_port = be_add_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_gso_check = be_gso_check, #endif }; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a2d72a87cbde..487cd9c4ac0d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) /* igb_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ - kfree_rcu(q_vector, rcu); + if (q_vector) + kfree_rcu(q_vector, rcu); } /** @@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - napi_synchronize(&(adapter->q_vector[i]->napi)); - napi_disable(&(adapter->q_vector[i]->napi)); + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } } @@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *ring, @@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); } /** @@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); } /** @@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev) pci_restore_state(pdev); pci_save_state(pdev); + if (!pci_device_is_present(pdev)) + return -ENODEV; err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d2df4e3d1032..cc51554c9e99 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * if SR-IOV and VMDQ are disabled - otherwise ensure * that hardware VLAN filters remain enabled. */ - if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED))) + if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED)) vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); } else { if (netdev->flags & IFF_ALLMULTI) { @@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) { reg = 0; @@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; + bool disable_dev = false; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -8369,13 +8375,14 @@ err_sw_init: iounmap(adapter->io_addr); kfree(adapter->mac_table); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: - if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; + bool disable_dev; ixgbe_dbg_adapter_exit(adapter); @@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 02266e3de514..4d69e382b4e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); #ifdef CONFIG_MLX4_EN_VXLAN - if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); #endif priv->port_up = true; @@ -2355,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); } + +static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops mlx4_netdev_ops = { @@ -2386,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, #endif }; @@ -2416,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5d2498dcf536..cd5cf6d957c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE: - count = get_param_l(&in_param); + count = get_param_l(&in_param) & 0xffffff; align = get_param_h(&in_param); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f5e29f7bdae3..a913b3ad2f89 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } + +static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops qlcnic_netdev_ops = { @@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { #ifdef CONFIG_QLCNIC_VXLAN .ndo_add_vxlan_port = qlcnic_add_vxlan_port, .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_gso_check = qlcnic_gso_check, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index db56fa7ce8f9..5b0da3986216 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(addr); plat_dat = dev_get_platdata(&pdev->dev); + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + if (pdev->dev.of_node) { if (!plat_dat) plat_dat = devm_kzalloc(&pdev->dev, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d8794488f80a..c560f9aeb55d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,9 +129,9 @@ do { \ #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 -#define CPSW_FIFO_NORMAL_MODE (0 << 15) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) #define CPSW_INTPACEEN (0x3f << 16) #define CPSW_INTPRESCALE_MASK (0x7FF << 0) diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c index 9ce854f43917..6cbc56ad9ff4 100644 --- a/drivers/net/ieee802154/fakehard.c +++ b/drivers/net/ieee802154/fakehard.c @@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) err = wpan_phy_register(phy); if (err) - goto out; + goto err_phy_reg; err = register_netdev(dev); - if (err < 0) - goto out; + if (err) + goto err_netdev_reg; dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); return 0; -out: - unregister_netdev(dev); +err_netdev_reg: + wpan_phy_unregister(phy); +err_phy_reg: + free_netdev(dev); + wpan_phy_free(phy); return err; } diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1aff970be33e..1dc628ffce2b 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; - sp.sa_family = AF_PPPOX; + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); + + sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_PPTP; sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 22756db53dca..b8a82b86f909 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -780,6 +780,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ec2a8b41ed41..b0bc8ead47de 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1673,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = { }; #endif +static bool virtnet_fail_on_feature(struct virtio_device *vdev, + unsigned int fbit, + const char *fname, const char *dname) +{ + if (!virtio_has_feature(vdev, fbit)) + return false; + + dev_err(&vdev->dev, "device advertises feature %s but not %s", + fname, dname); + + return true; +} + +#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ + virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) + +static bool virtnet_validate_features(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && + (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, + "VIRTIO_NET_F_CTRL_VQ"))) { + return false; + } + + return true; +} + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@ -1680,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev) struct virtnet_info *vi; u16 max_queue_pairs; + if (!virtnet_validate_features(vdev)) + return -EINVAL; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fa9dc45b75a6..be4649a49c5e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -67,12 +67,6 @@ #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ -/* VXLAN protocol header */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - /* UDP port for VXLAN traffic. * The IANA assigned port is 4789, but the Linux default is 8472 * for compatibility with early adopters. @@ -2312,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (ipv6) { udp_conf.family = AF_INET6; udp_conf.use_udp6_tx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); udp_conf.use_udp6_rx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = INADDR_ANY; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 697c4ae90af0..1e8ea5e4d4ca 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) ah->enabled_cals |= TX_CL_CAL; else ah->enabled_cals &= ~TX_CL_CAL; + + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { + if (ah->is_clk_25mhz) { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); + } else { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); + } + udelay(100); + } } static void ar9003_hw_prog_ini(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8be4b1453394..2ad605760e21 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); - - if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { - if (ah->is_clk_25mhz) { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); - } else { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); - } - udelay(100); - } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 30c66dfcd7a0..4f18a6be0c7d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, struct ath_vif *avp; /* - * Pick the MAC address of the first interface as the new hardware - * MAC address. The hardware will use it together with the BSSID mask - * when matching addresses. + * The hardware will use primary station addr together with the + * BSSID mask when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); memset(&iter_data->mask, 0xff, ETH_ALEN); @@ -1205,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, list_add_tail(&avp->list, &avp->chanctx->vifs); } + ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_assign_hw_queues(hw, vif); an->sc = sc; @@ -1274,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, ath_tx_node_cleanup(sc, &avp->mcast_node); + ath9k_calculate_summary_state(sc, avp->chanctx); + mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 1dfc682a8055..ee27b06074e1 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) { - assert_mac_suspended(dev); - dev->phy.ops->phy_write(dev, destreg, - dev->phy.ops->phy_read(dev, srcreg)); + b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg)); } void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index f05f5270fec1..927bffd5be64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) return; irq = irq_of_parse_and_map(np, 0); - if (irq < 0) { - brcmf_err("interrupt could not be mapped: err=%d\n", irq); + if (!irq) { + brcmf_err("interrupt could not be mapped\n"); devm_kfree(dev, sdiodev->pdata); return; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 8c0632ec9f7a..16fef3382019 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -19,10 +19,10 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/delay.h> -#include <linux/unaligned/access_ok.h> #include <linux/interrupt.h> #include <linux/bcma/bcma.h> #include <linux/sched.h> +#include <asm/unaligned.h> #include <soc.h> #include <chipcommon.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index dc135915470d..875d1142c8b0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd, goto finalize; } - if (!brcmf_usb_ioctl_resp_wait(devinfo)) + if (!brcmf_usb_ioctl_resp_wait(devinfo)) { + usb_kill_urb(devinfo->ctl_urb); ret = -ETIMEDOUT; - else + } else { memcpy(buffer, tmpbuf, buflen); + } finalize: kfree(tmpbuf); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 28fa25b509db..39b45c038a93 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, primary_offset = ch->center_freq1 - ch->chan->center_freq; switch (ch->width) { case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: ch_inf.bw = BRCMU_CHAN_BW_20; WARN_ON(primary_offset != 0); break; @@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, ch_inf.sb = BRCMU_CHAN_SB_LU; } break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: default: WARN_ON_ONCE(1); } @@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, case IEEE80211_BAND_5GHZ: ch_inf.band = BRCMU_CHAN_BAND_5G; break; + case IEEE80211_BAND_60GHZ: default: WARN_ON_ONCE(1); } diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 4f6e66892acc..b894a84e8393 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api { * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command + * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command */ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), @@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index b62405865b25..b6d2683da3a9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2448,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - /* Use aux roc framework (HS20) */ - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, - vif, duration); + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) { + /* Use aux roc framework (HS20) */ + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, + vif, duration); + goto out_unlock; + } + IWL_ERR(mvm, "hotspot not supported\n"); + ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index b280d5d87127..7554f7053830 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -602,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm) SCAN_COMPLETE_NOTIFICATION }; int ret; - if (mvm->scan_status == IWL_MVM_SCAN_NONE) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - mvm->scan_status = IWL_MVM_SCAN_NONE; - return 0; - } - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, scan_abort_notif, ARRAY_SIZE(scan_abort_notif), @@ -1400,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ieee80211_scan_completed(mvm->hw, true); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + mvm->scan_status = IWL_MVM_SCAN_NONE; + return 0; + } + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) return iwl_mvm_scan_offload_stop(mvm, true); return iwl_mvm_cancel_regular_scan(mvm); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 160c3ebc48d0..dd2f3f8baa9d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1894,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, int reg; __le32 *val; - prph_len += sizeof(*data) + sizeof(*prph) + - num_bytes_in_chunk; + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 8e68f87ab13c..66ff36447b94 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb) skb_trim(skb, frame_length); } -void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) +/* + * H/W needs L2 padding between the header and the paylod if header size + * is not 4 bytes aligned. + */ +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - unsigned int payload_length = skb->len - header_length; - unsigned int header_align = ALIGN_SIZE(skb, 0); - unsigned int payload_align = ALIGN_SIZE(skb, header_length); - unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; - /* - * Adjust the header alignment if the payload needs to be moved more - * than the header. - */ - if (payload_align > header_align) - header_align += 4; - - /* There is nothing to do if no alignment is needed */ - if (!header_align) + if (!l2pad) return; - /* Reserve the amount of space needed in front of the frame */ - skb_push(skb, header_align); - - /* - * Move the header. - */ - memmove(skb->data, skb->data + header_align, header_length); - - /* Move the payload, if present and if required */ - if (payload_length && payload_align) - memmove(skb->data + header_length + l2pad, - skb->data + header_length + l2pad + payload_align, - payload_length); - - /* Trim the skb to the correct size */ - skb_trim(skb, header_length + l2pad + payload_length); + skb_push(skb, l2pad); + memmove(skb->data, skb->data + l2pad, hdr_len); } -void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - /* - * L2 padding is only present if the skb contains more than just the - * IEEE 802.11 header. - */ - unsigned int l2pad = (skb->len > header_length) ? - L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; - memmove(skb->data + l2pad, skb->data, header_length); + memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 25daa8715219..846a2e6e34d8 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) break; } /* handle command packet here */ - if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { + if (rtlpriv->cfg->ops->rx_command_packet && + rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { dev_kfree_skb_any(skb); goto end; } @@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) __skb_queue_tail(&ring->queue, pskb); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, - &temp_one); - + if (rtlpriv->use_new_trx_flow) { + temp_one = 4; + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true, + HW_DESC_OWN, (u8 *)&temp_one); + } else { + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, + &temp_one); + } return; } @@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, ring->desc = NULL; if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, - sizeof(*ring->desc) * ring->entries, + sizeof(*ring->buffer_desc) * ring->entries, ring->buffer_desc, ring->buffer_desc_dma); - ring->desc = NULL; + ring->buffer_desc = NULL; } } @@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); - ring->idx = (ring->idx + 1) % ring->entries; kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } @@ -2244,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev, /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + err = -ENODEV; + goto fail3; + } + rtlpriv->cfg->ops->init_sw_leds(hw); + + /*aspm */ + rtl_pci_init_aspm(hw); + /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { @@ -2259,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev, goto fail3; } - if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); - err = -ENODEV; - goto fail3; - } - rtlpriv->cfg->ops->init_sw_leds(hw); - - /*aspm */ - rtl_pci_init_aspm(hw); - err = ieee80211_register_hw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c index 00e067044c08..5761d5b49e39 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw, } + if (type != NL80211_IFTYPE_AP && + rtlpriv->mac80211.link_state < MAC80211_LINKED) + bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK; rtl_write_byte(rtlpriv, (MSR), bt_msr); temp = rtl_read_dword(rtlpriv, TCR); @@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]); /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */ rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F); + rtlpci->irq_enabled = true; } void rtl92se_disable_interrupt(struct ieee80211_hw *hw) @@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw) rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_dword(rtlpriv, INTA_MASK, 0); rtl_write_dword(rtlpriv, INTA_MASK + 4, 0); - - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c index 77c5b5f35244..4b4612fe2fdb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c @@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + return true; } if (currentcmd->cmdid == CMDID_END) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index aadba29c167a..fb003868bdef 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -236,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw) } } +static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, + u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN); + + if (own) + return false; + return true; +} + static struct rtl_hal_ops rtl8192se_hal_ops = { .init_sw_vars = rtl92s_init_sw_vars, .deinit_sw_vars = rtl92s_deinit_sw_vars, @@ -269,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = { .led_control = rtl92se_led_control, .set_desc = rtl92se_set_desc, .get_desc = rtl92se_get_desc, + .is_tx_desc_closed = rtl92se_is_tx_desc_closed, .tx_polling = rtl92se_tx_polling, .enable_hw_sec = rtl92se_enable_hw_security_config, .set_key = rtl92se_set_key, @@ -306,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[MAC_RCR_ACRC32] = RCR_ACRC32, .maps[MAC_RCR_ACF] = RCR_ACF, .maps[MAC_RCR_AAP] = RCR_AAP, + .maps[MAC_HIMR] = INTA_MASK, + .maps[MAC_HIMRE] = INTA_MASK + 4, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 310d3163dc5b..8ec8200002c7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; if (wirelessmode == WIRELESS_MODE_N_5G || - wirelessmode == WIRELESS_MODE_AC_5G) - ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; + wirelessmode == WIRELESS_MODE_AC_5G || + wirelessmode == WIRELESS_MODE_A) + ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4; else ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 4e56a27f9689..fab0d4b42f58 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -39,7 +39,7 @@ struct backend_info { static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); static void connect(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be); -static void backend_create_xenvif(struct backend_info *be); +static int backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); static void set_backend_state(struct backend_info *be, enum xenbus_state state); @@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev, be->state = XenbusStateInitWait; /* This kicks hotplug scripts, so do it immediately. */ - backend_create_xenvif(be); + err = backend_create_xenvif(be); + if (err) + goto fail; return 0; @@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev, } -static void backend_create_xenvif(struct backend_info *be) +static int backend_create_xenvif(struct backend_info *be) { int err; long handle; struct xenbus_device *dev = be->dev; if (be->vif != NULL) - return; + return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); if (err != 1) { xenbus_dev_fatal(dev, err, "reading handle"); - return; + return (err < 0) ? err : -EINVAL; } be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); @@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be) err = PTR_ERR(be->vif); be->vif = NULL; xenbus_dev_fatal(dev, err, "creating interface"); - return; + return err; } kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); + return 0; } static void backend_disconnect(struct backend_info *be) diff --git a/drivers/of/address.c b/drivers/of/address.c index afdb78299f61..06af494184d6 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -450,6 +450,21 @@ static struct of_bus *of_match_bus(struct device_node *np) return NULL; } +static int of_empty_ranges_quirk(void) +{ + if (IS_ENABLED(CONFIG_PPC)) { + /* To save cycles, we cache the result */ + static int quirk_state = -1; + + if (quirk_state < 0) + quirk_state = + of_machine_is_compatible("Power Macintosh") || + of_machine_is_compatible("MacRISC"); + return quirk_state; + } + return false; +} + static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, __be32 *addr, int na, int ns, int pna, const char *rprop) @@ -475,12 +490,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); -#if !defined(CONFIG_PPC) - if (ranges == NULL) { + if (ranges == NULL && !of_empty_ranges_quirk()) { pr_err("OF: no ranges; cannot translate\n"); return 1; } -#endif /* !defined(CONFIG_PPC) */ if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index f297891d8529..d4994177dec2 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -247,7 +247,7 @@ void of_node_release(struct kobject *kobj) * @allocflags: Allocation flags (typically pass GFP_KERNEL) * * Copy a property by dynamically allocating the memory of both the - * property stucture and the property name & contents. The property's + * property structure and the property name & contents. The property's * flags have the OF_DYNAMIC bit set so that we can differentiate between * dynamically allocated properties and not. * Returns the newly allocated property or NULL on out of memory error. diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index d1ffca8b34ea..30e97bcc4f88 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void) if (offset < 0) return -ENODEV; - while (match->compatible) { + while (match->compatible[0]) { unsigned long addr; if (fdt_node_check_compatible(fdt, offset, match->compatible)) { match++; diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index 11b873c54a77..e2d79afa9dc6 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -896,10 +896,14 @@ static void selftest_data_remove(void) return; } - while (last_node_index >= 0) { + while (last_node_index-- > 0) { if (nodes[last_node_index]) { np = of_find_node_by_path(nodes[last_node_index]->full_name); - if (strcmp(np->full_name, "/aliases") != 0) { + if (np == nodes[last_node_index]) { + if (of_aliases == np) { + of_node_put(of_aliases); + of_aliases = NULL; + } detach_node_and_children(np); } else { for_each_property_of_node(np, prop) { @@ -908,7 +912,6 @@ static void selftest_data_remove(void) } } } - last_node_index--; } } @@ -921,6 +924,8 @@ static int __init of_selftest(void) res = selftest_data_add(); if (res) return res; + if (!of_aliases) + of_aliases = of_find_node_by_path("/aliases"); np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); if (!np) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index d292d7cb3417..49dd766852ba 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -444,7 +444,7 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) +bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 9ecabfa8c634..2988fe136c1e 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -631,10 +631,15 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) return ret; - bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); + bus = pci_create_root_bus(&pdev->dev, 0, + &xgene_pcie_ops, port, &res); if (!bus) return -ENOMEM; + pci_scan_child_bus(bus); + pci_assign_unassigned_bus_resources(bus); + pci_bus_add_devices(bus); + platform_set_drvdata(pdev, port); return 0; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 9fab30af0e75..084587d7cd13 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) return entry; } +static int msi_verify_entries(struct pci_dev *dev) +{ + struct msi_desc *entry; + + list_for_each_entry(entry, &dev->msi_list, list) { + if (!dev->no_64bit_msi || !entry->msg.address_hi) + continue; + dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" + " tried to assign one above 4G\n"); + return -EIO; + } + return 0; +} + /** * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function @@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) return ret; } + ret = msi_verify_entries(dev); + if (ret) { + msi_mask_irq(entry, mask, ~mask); + free_msi_irqs(dev); + return ret; + } + ret = populate_msi_sysfs(dev); if (ret) { msi_mask_irq(entry, mask, ~mask); @@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev, if (ret) goto out_avail; + /* Check if all MSI entries honor device restrictions */ + ret = msi_verify_entries(dev); + if (ret) + goto out_free; + /* * Some devices require MSI-X to be enabled before we can touch the * MSI-X registers. We need to mask all the vectors to prevent diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 0601890db22d..4a3902d8e6fe 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -6,6 +6,8 @@ extern const unsigned char pcie_link_speed[]; +bool pcie_cap_has_lnkctl(const struct pci_dev *dev); + /* Functions internal to the PCI core code */ int pci_create_sysfs_dev_files(struct pci_dev *pdev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5ed99309c758..c8ca98c2b480 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -407,15 +407,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; - unsigned long base, limit; + u64 base64, limit64; + dma_addr_t base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); - base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; - limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; + base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; + limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; @@ -429,17 +430,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { -#if BITS_PER_LONG == 64 - base |= ((unsigned long) mem_base_hi) << 32; - limit |= ((unsigned long) mem_limit_hi) << 32; -#else - if (mem_base_hi || mem_limit_hi) { - dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n"); - return; - } -#endif + base64 |= (u64) mem_base_hi << 32; + limit64 |= (u64) mem_limit_hi << 32; } } + + base = (dma_addr_t) base64; + limit = (dma_addr_t) limit64; + + if (base != base64) { + dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", + (unsigned long long) base64); + return; + } + if (base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; @@ -1323,7 +1327,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (dev->subordinate) + if (pcie_cap_has_lnkctl(dev)) pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 79e5c94107a9..72533c58c1f3 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; + struct sk_buff *tmp_skb; unsigned short oxid; interface = container_of(ptype, struct bnx2fc_interface, @@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, goto err; } + tmp_skb = skb_share_check(skb, GFP_ATOMIC); + if (!tmp_skb) + goto err; + + skb = tmp_skb; + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 81bb3bd7909d..15081257cfc8 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -828,6 +828,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) if (status == CPL_ERR_RTX_NEG_ADVICE) goto rel_skb; + module_put(THIS_MODULE); + if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 13d869a92248..7da59c38a69e 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -816,7 +816,7 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) read_lock_bh(&csk->callback_lock); if (csk->user_data) iscsi_conn_failure(csk->user_data, - ISCSI_ERR_CONN_FAILED); + ISCSI_ERR_TCP_CONN_CLOSE); read_unlock_bh(&csk->callback_lock); } } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 49014a143c6a..c1d04d4d3c6c 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -202,6 +202,7 @@ static struct { {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 8adf067ff019..1c3467b82566 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), GFP_KERNEL); if (!clkfreq) { - dev_err(dev, "%s: no memory\n", "freq-table-hz"); ret = -ENOMEM; goto out; } @@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (ret && (ret != -EINVAL)) { dev_err(dev, "%s: error reading array %d\n", "freq-table-hz", ret); - goto free_clkfreq; + return ret; } for (i = 0; i < sz; i += 2) { ret = of_property_read_string_index(np, "clock-names", i/2, (const char **)&name); if (ret) - goto free_clkfreq; + goto out; clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); if (!clki) { ret = -ENOMEM; - goto free_clkfreq; + goto out; } clki->min_freq = clkfreq[i]; @@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clki->min_freq, clki->max_freq, clki->name); list_add_tail(&clki->list, &hba->clk_list_head); } -free_clkfreq: - kfree(clkfreq); out: return ret; } @@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, } vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) { - dev_err(dev, "No memory for %s regulator\n", name); - goto out; - } + if (!vreg) + return -ENOMEM; vreg->name = kstrdup(name, GFP_KERNEL); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 497c38a4a866..605ca60e8a10 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba)) return; device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + cancel_work_sync(&hba->clk_gating.ungate_work); + cancel_delayed_work_sync(&hba->clk_gating.gate_work); } /* Must be called with host lock acquired */ @@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } + /** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + * @hba: per-adapter instance + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + /** * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device * @hba: per-adapter instance @@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba = shost_priv(sdev->host); scsi_deactivate_tcq(sdev, hba->nutrs); /* Drop the reference as it won't be needed anymore */ - if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); hba->sdev_ufs_device = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } /** @@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) { int ret = 0; + struct scsi_device *sdev_rpmb; + struct scsi_device *sdev_boot; hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); @@ -4070,26 +4095,27 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) hba->sdev_ufs_device = NULL; goto out; } + scsi_device_put(hba->sdev_ufs_device); - hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, + sdev_boot = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); - if (IS_ERR(hba->sdev_boot)) { - ret = PTR_ERR(hba->sdev_boot); - hba->sdev_boot = NULL; + if (IS_ERR(sdev_boot)) { + ret = PTR_ERR(sdev_boot); goto remove_sdev_ufs_device; } + scsi_device_put(sdev_boot); - hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + sdev_rpmb = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); - if (IS_ERR(hba->sdev_rpmb)) { - ret = PTR_ERR(hba->sdev_rpmb); - hba->sdev_rpmb = NULL; + if (IS_ERR(sdev_rpmb)) { + ret = PTR_ERR(sdev_rpmb); goto remove_sdev_boot; } + scsi_device_put(sdev_rpmb); goto out; remove_sdev_boot: - scsi_remove_device(hba->sdev_boot); + scsi_remove_device(sdev_boot); remove_sdev_ufs_device: scsi_remove_device(hba->sdev_ufs_device); out: @@ -4097,30 +4123,6 @@ out: } /** - * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by - * ufshcd_scsi_add_wlus() - * @hba: per-adapter instance - * - */ -static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba) -{ - if (hba->sdev_ufs_device) { - scsi_remove_device(hba->sdev_ufs_device); - hba->sdev_ufs_device = NULL; - } - - if (hba->sdev_boot) { - scsi_remove_device(hba->sdev_boot); - hba->sdev_boot = NULL; - } - - if (hba->sdev_rpmb) { - scsi_remove_device(hba->sdev_rpmb); - hba->sdev_rpmb = NULL; - } -} - -/** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance * @@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + ufshcd_init_pwr_info(hba); + /* UniPro link is active now */ ufshcd_set_link_active(hba); @@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } @@ -4471,7 +4481,7 @@ out: if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); } - } else if (!ret && on) { + } else if (on) { spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.state = CLKS_ON; spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, { unsigned char cmd[6] = { START_STOP }; struct scsi_sense_hdr sshdr; - struct scsi_device *sdp = hba->sdev_ufs_device; + struct scsi_device *sdp; + unsigned long flags; int ret; - if (!sdp || !scsi_device_online(sdp)) - return -ENODEV; + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->sdev_ufs_device; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + return ret; /* * If scsi commands fail, the scsi mid-layer schedules scsi error- @@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, if (!ret) hba->curr_dev_pwr_mode = pwr_mode; out: + scsi_device_put(sdp); hba->host->eh_noresume = 0; return ret; } @@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba) int ret = 0; if (!hba || !hba->is_powered) - goto out; + return 0; if (pm_runtime_suspended(hba->dev)) { if (hba->rpm_lvl == hba->spm_lvl) @@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown); void ufshcd_remove(struct ufs_hba *hba) { scsi_remove_host(hba->host); - ufshcd_scsi_remove_wlus(hba); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 58ecdff5065c..4a574aa45855 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -392,8 +392,6 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; - struct scsi_device *sdev_rpmb; - struct scsi_device *sdev_boot; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 72e12bad14b9..d0d5542efc06 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data) chip = dws->cur_chip; spi = message->spi; - if (unlikely(!chip->clk_div)) - chip->clk_div = dws->max_freq / chip->speed_hz; - if (message->state == ERROR_STATE) { message->status = -EIO; goto early_exit; @@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data) if (transfer->speed_hz) { speed = chip->speed_hz; - if (transfer->speed_hz != speed) { + if ((transfer->speed_hz != speed) || (!chip->clk_div)) { speed = transfer->speed_hz; /* clk_div doesn't support odd number */ @@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi) dev_err(&spi->dev, "No max speed HZ parameter\n"); return -EINVAL; } - chip->speed_hz = spi->max_speed_hz; chip->tmode = 0; /* Tx & Rx */ /* Default SPI mode is SCPOL = 0, SCPH = 0 */ diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index 39e2c0a55a28..f63de781c729 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | - sspi->word_width; + (sspi->word_width >> 1); rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | - sspi->word_width; + (sspi->word_width >> 1); if (!(spi->mode & SPI_CS_HIGH)) regval |= SIRFSOC_SPI_CS_IDLE_STAT; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ebcb33df2eb2..50f20f243981 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, sg_free_table(sgt); return -ENOMEM; } - sg_buf = page_address(vm_page) + - ((size_t)buf & ~PAGE_MASK); + sg_set_page(&sgt->sgl[i], vm_page, + min, offset_in_page(buf)); } else { sg_buf = buf; + sg_set_buf(&sgt->sgl[i], sg_buf, min); } - sg_set_buf(&sgt->sgl[i], sg_buf, min); buf += min; len -= min; diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9935e66935af..eddef9cd2e16 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) return _FAIL; - psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); + psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); if (psurveyPara == NULL) { kfree(ph2c); return _FAIL; @@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) else RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd == NULL) { res = _FAIL; RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); @@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter) u8 res = _SUCCESS; - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue) u8 res = _SUCCESS; if (enqueue) { - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time) u8 res = _SUCCESS; - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter) u8 res = _SUCCESS; - ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ppscmd == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ppscmd); res = _FAIL; diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index 5ba5099ec20d..70b1bc3e0e63 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter, pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd_obj == NULL) return; cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (pevtcmd == NULL) { kfree(pcmd_obj); return; @@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res) struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd_obj == NULL) return; cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (pevtcmd == NULL) { kfree(pcmd_obj); return; @@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context) pmlmeext->scan_abort = false;/* reset */ } - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) goto exit_survey_timer_hdl; - psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); + psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); if (psurveyPara == NULL) { kfree(ph2c); goto exit_survey_timer_hdl; diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 33ccbbbd8ed6..d300369977fa 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c @@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) return true; } - bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL); + bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC); subtype = GetFrameSubType(pframe) >> 4; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 407a318b09db..2f87150a21b7 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {} /* Terminating entry */ }; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index b19e4329ba00..73e58d22e325 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", inaddr_any ? conn->local_ip : np->np_ip, - inaddr_any ? conn->local_port : np->np_port, + np->np_port, tpg->tpgt); len += 1; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8c60a1a1ae8d..9f93b8234095 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; struct t10_reservation *pr_tmpl = &dev->t10_pr; u32 pr_res_mapped_lun = 0; - int all_reg = 0, calling_it_nexus = 0, released_regs = 0; + int all_reg = 0, calling_it_nexus = 0; + bool sa_res_key_unmatched = sa_res_key != 0; int prh_type = 0, prh_scope = 0; if (!se_sess) @@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if (!all_reg) { if (pr_reg->pr_res_key != sa_res_key) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; pr_reg_nacl = pr_reg->pr_reg_nacl; @@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, calling_it_nexus); - released_regs++; } else { /* * Case for any existing all registrants type @@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if ((sa_res_key) && (pr_reg->pr_res_key != sa_res_key)) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; if (calling_it_nexus) @@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, 0); - released_regs++; } if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, @@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * registered reservation key, then the device server shall * complete the command with RESERVATION CONFLICT status. */ - if (!released_regs) { + if (sa_res_key_unmatched) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return TCM_RESERVATION_CONFLICT; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9ea0d5f03f7a..be877bf6f730 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 1ab0018271c5..ad09e51ffae4 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -50,15 +50,14 @@ struct cpufreq_cooling_device { unsigned int cpufreq_state; unsigned int cpufreq_val; struct cpumask allowed_cpus; + struct list_head node; }; static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); static unsigned int cpufreq_dev_count; -/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ -#define NOTIFY_INVALID NULL -static struct cpufreq_cooling_device *notify_device; +static LIST_HEAD(cpufreq_dev_list); /** * get_idr - function to get a unique id. @@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, cpufreq_device->cpufreq_state = cooling_state; cpufreq_device->cpufreq_val = clip_freq; - notify_device = cpufreq_device; for_each_cpu(cpuid, mask) { if (is_cpufreq_valid(cpuid)) cpufreq_update_policy(cpuid); } - notify_device = NOTIFY_INVALID; - return 0; } @@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; + struct cpufreq_cooling_device *cpufreq_dev; - if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) + if (event != CPUFREQ_ADJUST) return 0; - if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) - max_freq = notify_device->cpufreq_val; - else - return 0; + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (!cpumask_test_cpu(policy->cpu, + &cpufreq_dev->allowed_cpus)) + continue; + + if (!cpufreq_dev->cpufreq_val) + cpufreq_dev->cpufreq_val = get_cpu_frequency( + cpumask_any(&cpufreq_dev->allowed_cpus), + cpufreq_dev->cpufreq_state); - /* Never exceed user_policy.max */ - if (max_freq > policy->user_policy.max) - max_freq = policy->user_policy.max; + max_freq = cpufreq_dev->cpufreq_val; - if (policy->max != max_freq) - cpufreq_verify_within_limits(policy, 0, max_freq); + if (policy->max != max_freq) + cpufreq_verify_within_limits(policy, 0, max_freq); + } + mutex_unlock(&cooling_cpufreq_lock); return 0; } @@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np, cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_dev_count++; + list_add(&cpufreq_dev->node, &cpufreq_dev_list); mutex_unlock(&cooling_cpufreq_lock); @@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev = cdev->devdata; mutex_lock(&cooling_cpufreq_lock); + list_del(&cpufreq_dev->node); cpufreq_dev_count--; /* Unregister the notifier for the last cpufreq cooling device */ diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c index 3f5ad25ddca8..b6be572704a4 100644 --- a/drivers/thermal/samsung/exynos_thermal_common.c +++ b/drivers/thermal/samsung/exynos_thermal_common.c @@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) th_zone = sensor_conf->pzone_data; - if (th_zone->therm_dev) - thermal_zone_device_unregister(th_zone->therm_dev); + thermal_zone_device_unregister(th_zone->therm_dev); - for (i = 0; i < th_zone->cool_dev_size; i++) { - if (th_zone->cool_dev[i]) - cpufreq_cooling_unregister(th_zone->cool_dev[i]); - } + for (i = 0; i < th_zone->cool_dev_size; ++i) + cpufreq_cooling_unregister(th_zone->cool_dev[i]); dev_info(sensor_conf->dev, "Exynos: Kernel Thermal management unregistered\n"); diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index 90163b384660..d1ec5804c0bb 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(st_thermal_unregister); +#ifdef CONFIG_PM_SLEEP static int st_thermal_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev) return 0; } +#endif + SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); EXPORT_SYMBOL_GPL(st_thermal_pm_ops); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 56982da4a9e9..bf355050eab6 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int of_serial_suspend(struct device *dev) -{ - struct of_serial_info *info = dev_get_drvdata(dev); - - serial8250_suspend_port(info->line); - if (info->clk) - clk_disable_unprepare(info->clk); - - return 0; -} - -static int of_serial_resume(struct device *dev) -{ - struct of_serial_info *info = dev_get_drvdata(dev); - - if (info->clk) - clk_prepare_enable(info->clk); - - serial8250_resume_port(info->line); - - return 0; -} -#endif -static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume); - /* * A few common types, add more as needed. */ @@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = { .name = "of_serial", .owner = THIS_MODULE, .of_match_table = of_platform_serial_table, - .pm = &of_serial_pm_ops, }, .probe = of_platform_serial_probe, .remove = of_platform_serial_remove, diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 39b4081b632d..96fafed92b76 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Wireless Laser Mouse 6000 Receiver */ + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 711b23019d54..df38e7ef4976 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, trb = dwc->ep0_trb; + r = next_request(&ep0->request_list); + if (!r) + return; + status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); @@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, return; } - r = next_request(&ep0->request_list); - if (!r) - return; - ur = &r->request; length = trb->size & DWC3_TRB_SIZE_MASK; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 696160d48ae8..388cfd83b6b6 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -22,7 +22,6 @@ #include <linux/slab.h> -#include <linux/device.h> #include <asm/unaligned.h> #include "xhci.h" @@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup - && device_may_wakeup(hcd->self.controller)) { - + if (hcd->self.root_hub->do_remote_wakeup) { if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 9a69b1f1b300..142b601f9563 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) if (xhci->quirks & XHCI_COMP_MODE_QUIRK) pdev->no_d3cold = true; - return xhci_suspend(xhci); + return xhci_suspend(xhci, do_wakeup); } static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3d78b0cd674b..646300cbe5f7 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - return xhci_suspend(xhci); + /* + * xhci_suspend() needs `do_wakeup` to know whether host is allowed + * to do wakeup during suspend. Since xhci_plat_suspend is currently + * only designed for system suspend, device_may_wakeup() is enough + * to dertermine whether host is allowed to do wakeup. Need to + * reconsider this when xhci_plat_suspend enlarges its scope, e.g., + * also applies to runtime suspend. + */ + return xhci_suspend(xhci, device_may_wakeup(dev)); } static int xhci_plat_resume(struct device *dev) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index bc6fcbc16f61..06433aec81d7 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, false); xhci_ring_cmd_db(xhci); } else { - /* Clear our internal halted state and restart the ring(s) */ + /* Clear our internal halted state */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } } @@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ep->stopped_td = td; return 0; } else { - if (trb_comp_code == COMP_STALL) { - /* The transfer is completed from the driver's - * perspective, but we need to issue a set dequeue - * command for this stalled endpoint to move the dequeue - * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. Let the - * USB class driver clear the stall later. - */ - ep->stopped_td = td; - ep->stopped_stream = ep_ring->stream_id; - } else if (xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) { - /* Other types of errors halt the endpoint, but the - * class driver doesn't call usb_reset_endpoint() unless - * the error is -EPIPE. Clear the halted status in the - * xHCI hardware manually. + if (trb_comp_code == COMP_STALL || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + /* Issue a reset endpoint command to clear the host side + * halt, followed by a set dequeue command to move the + * dequeue pointer past the TD. + * The class driver clears the device side halt later. */ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, @@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, else td->urb->actual_length = 0; - xhci_cleanup_halted_endpoint(xhci, - slot_id, ep_index, 0, td, event_trb); - return finish_td(xhci, td, event_trb, event, ep, status, true); + return finish_td(xhci, td, event_trb, event, ep, status, false); } /* * Did we transfer any data, despite the errors that might have @@ -2519,17 +2507,8 @@ cleanup: if (ret) { urb = td->urb; urb_priv = urb->hcpriv; - /* Leave the TD around for the reset endpoint function - * to use(but only if it's not a control endpoint, - * since we already queued the Set TR dequeue pointer - * command for stalled control endpoints). - */ - if (usb_endpoint_xfer_control(&urb->ep->desc) || - (trb_comp_code != COMP_STALL && - trb_comp_code != COMP_BABBLE)) - xhci_urb_free_priv(xhci, urb_priv); - else - kfree(urb_priv); + + xhci_urb_free_priv(xhci, urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2a5d45b4cb15..033b46c470bd 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -35,6 +35,8 @@ #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) + /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ static int link_quirk; module_param(link_quirk, int, S_IRUGO | S_IWUSR); @@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } +static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +{ + int port_index; + __le32 __iomem **port_array; + unsigned long flags; + u32 t1, t2; + + spin_lock_irqsave(&xhci->lock, flags); + + /* disble usb3 ports Wake bits*/ + port_index = xhci->num_usb3_ports; + port_array = xhci->usb3_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + /* disble usb2 ports Wake bits*/ + port_index = xhci->num_usb2_ports; + port_array = xhci->usb2_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + spin_unlock_irqrestore(&xhci->lock, flags); +} + /* * Stop HC (not bus-specific) * * This is called when the machine transition into S3/S4 mode. * */ -int xhci_suspend(struct xhci_hcd *xhci) +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; unsigned int delay = XHCI_MAX_HALT_USEC; @@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci) xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; + /* Clear root port wake on bits if wakeup not allowed. */ + if (!do_wakeup) + xhci_disable_port_wake_on_bits(xhci); + /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, } } -/* Deal with stalled endpoints. The core should have sent the control message - * to clear the halt condition. However, we need to make the xHCI hardware - * reset its sequence number, since a device will expect a sequence number of - * zero after the halt condition is cleared. +/* Called when clearing halted device. The core should have sent the control + * message to clear the device halt condition. The host side of the halt should + * already be cleared with a reset endpoint command issued when the STALL tx + * event was received. + * * Context: in_interrupt */ + void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct usb_device *udev; - unsigned int ep_index; - unsigned long flags; - int ret; - struct xhci_virt_ep *virt_ep; - struct xhci_command *command; xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - /* Called with a root hub endpoint (or an endpoint that wasn't added - * with xhci_add_endpoint() - */ - if (!ep->hcpriv) - return; - ep_index = xhci_get_endpoint_index(&ep->desc); - virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; - if (!virt_ep->stopped_td) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Endpoint 0x%x not halted, refusing to reset.", - ep->desc.bEndpointAddress); - return; - } - if (usb_endpoint_xfer_control(&ep->desc)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Control endpoint stall already handled."); - return; - } - command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); - if (!command) - return; - - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Queueing reset endpoint command"); - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index); /* - * Can't change the ring dequeue pointer until it's transitioned to the - * stopped state, which is only upon a successful reset endpoint - * command. Better hope that last command worked! + * We might need to implement the config ep cmd in xhci 4.8.1 note: + * The Reset Endpoint Command may only be issued to endpoints in the + * Halted state. If software wishes reset the Data Toggle or Sequence + * Number of an endpoint that isn't in the Halted state, then software + * may issue a Configure Endpoint Command with the Drop and Add bits set + * for the target endpoint. that is in the Stopped state. */ - if (!ret) { - xhci_cleanup_stalled_ring(xhci, udev, ep_index); - kfree(virt_ep->stopped_td); - xhci_ring_cmd_db(xhci); - } - virt_ep->stopped_td = NULL; - virt_ep->stopped_stream = 0; - spin_unlock_irqrestore(&xhci->lock, flags); - if (ret) - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); + /* For now just print debug to follow the situation */ + xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", + ep->desc.bEndpointAddress); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index df76d642e719..d745715a1e2f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *)); #ifdef CONFIG_PM -int xhci_suspend(struct xhci_hcd *xhci); +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); int xhci_resume(struct xhci_hcd *xhci, bool hibernated); #else #define xhci_suspend NULL diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index cfd009dc4018..6c4eb3cf5efd 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0dad8ce5a609..1ebb351b9e9a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 6786b705ccf6..e52409c9be99 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -926,8 +926,8 @@ #define BAYER_CONTOUR_CABLE_PID 0x6001 /* - * The following are the values for the Matrix Orbital FTDI Range - * Anything in this range will use an FT232RL. + * Matrix Orbital Intelligent USB displays. + * http://www.matrixorbital.com */ #define MTXORB_VID 0x1B3D #define MTXORB_FTDI_RANGE_0100_PID 0x0100 @@ -1186,8 +1186,39 @@ #define MTXORB_FTDI_RANGE_01FD_PID 0x01FD #define MTXORB_FTDI_RANGE_01FE_PID 0x01FE #define MTXORB_FTDI_RANGE_01FF_PID 0x01FF - - +#define MTXORB_FTDI_RANGE_4701_PID 0x4701 +#define MTXORB_FTDI_RANGE_9300_PID 0x9300 +#define MTXORB_FTDI_RANGE_9301_PID 0x9301 +#define MTXORB_FTDI_RANGE_9302_PID 0x9302 +#define MTXORB_FTDI_RANGE_9303_PID 0x9303 +#define MTXORB_FTDI_RANGE_9304_PID 0x9304 +#define MTXORB_FTDI_RANGE_9305_PID 0x9305 +#define MTXORB_FTDI_RANGE_9306_PID 0x9306 +#define MTXORB_FTDI_RANGE_9307_PID 0x9307 +#define MTXORB_FTDI_RANGE_9308_PID 0x9308 +#define MTXORB_FTDI_RANGE_9309_PID 0x9309 +#define MTXORB_FTDI_RANGE_930A_PID 0x930A +#define MTXORB_FTDI_RANGE_930B_PID 0x930B +#define MTXORB_FTDI_RANGE_930C_PID 0x930C +#define MTXORB_FTDI_RANGE_930D_PID 0x930D +#define MTXORB_FTDI_RANGE_930E_PID 0x930E +#define MTXORB_FTDI_RANGE_930F_PID 0x930F +#define MTXORB_FTDI_RANGE_9310_PID 0x9310 +#define MTXORB_FTDI_RANGE_9311_PID 0x9311 +#define MTXORB_FTDI_RANGE_9312_PID 0x9312 +#define MTXORB_FTDI_RANGE_9313_PID 0x9313 +#define MTXORB_FTDI_RANGE_9314_PID 0x9314 +#define MTXORB_FTDI_RANGE_9315_PID 0x9315 +#define MTXORB_FTDI_RANGE_9316_PID 0x9316 +#define MTXORB_FTDI_RANGE_9317_PID 0x9317 +#define MTXORB_FTDI_RANGE_9318_PID 0x9318 +#define MTXORB_FTDI_RANGE_9319_PID 0x9319 +#define MTXORB_FTDI_RANGE_931A_PID 0x931A +#define MTXORB_FTDI_RANGE_931B_PID 0x931B +#define MTXORB_FTDI_RANGE_931C_PID 0x931C +#define MTXORB_FTDI_RANGE_931D_PID 0x931D +#define MTXORB_FTDI_RANGE_931E_PID 0x931E +#define MTXORB_FTDI_RANGE_931F_PID 0x931F /* * The Mobility Lab (TML) diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 93cb7cebda62..077c714f1285 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb) if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err */ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) - tty_insert_flip_char(&port->port, data[i], err); + tty_insert_flip_char(&port->port, data[i], + TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb) } else { /* some bytes had errors, every byte has status */ for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb) */ for (x = 0; x + 1 < len && i + 1 < urb->actual_length; x += 2) { - int stat = data[i], flag = 0; + int stat = data[i]; + int flag = TTY_NORMAL; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); i += 2; @@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb) if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err*/ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) tty_insert_flip_char(&port->port, - data[i], err); + data[i], TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char( + &port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index a7fe664b6b7d..70a098de429f 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_FRAME; } - if (lsr & UART_LSR_OE){ + if (lsr & UART_LSR_OE) { port->icount.overrun++; - if (*tty_flag == TTY_NORMAL) - *tty_flag = TTY_OVERRUN; + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } @@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb) if ((len >= 4) && (packet[0] == 0x1b) && (packet[1] == 0x1b) && ((packet[2] == 0x00) || (packet[2] == 0x01))) { - if (packet[2] == 0x00) { + if (packet[2] == 0x00) ssu100_update_lsr(port, packet[3], &flag); - if (flag == TTY_OVERRUN) - tty_insert_flip_char(&port->port, 0, - TTY_OVERRUN); - } if (packet[2] == 0x01) ssu100_update_msr(port, packet[3]); diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 2fefaf923e4a..18a283d6de1c 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VL711", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), + +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ +UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, + "Hitachi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 69906cacd04f..a17f11850669 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1312,6 +1312,7 @@ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg **vs_tpg; @@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ret = -EEXIST; goto out; } + /* + * In order to ensure individual vhost-scsi configfs + * groups cannot be removed while in use by vhost ioctl, + * go ahead and take an explicit se_tpg->tpg_group.cg_item + * dependency now. + */ + se_tpg = &tpg->se_tpg; + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); + if (ret) { + pr_warn("configfs_depend_item() failed: %d\n", ret); + kfree(vs_tpg); + mutex_unlock(&tpg->tv_tpg_mutex); + goto out; + } tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; @@ -1401,6 +1417,7 @@ static int vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct vhost_virtqueue *vq; @@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vs->vs_tpg[target] = NULL; match = true; mutex_unlock(&tpg->tv_tpg_mutex); + /* + * Release se_tpg->tpg_group.cg_item configfs dependency now + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. + */ + se_tpg = &tpg->se_tpg; + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { diff --git a/fs/Makefile b/fs/Makefile index 34a1b9dea6dd..da0bbb456d3f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -104,7 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ +obj-$(CONFIG_OVERLAY_FS) += overlayfs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ @@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt; static const struct file_operations aio_ring_fops; static const struct address_space_operations aio_ctx_aops; +/* Backing dev info for aio fs. + * -no dirty page accounting or writeback happens + */ +static struct backing_dev_info aio_fs_backing_dev_info = { + .name = "aiofs", + .state = 0, + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, +}; + static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) { struct qstr this = QSTR_INIT("[aio]", 5); @@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) inode->i_mapping->a_ops = &aio_ctx_aops; inode->i_mapping->private_data = ctx; + inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; inode->i_size = PAGE_SIZE * nr_pages; path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); @@ -220,6 +230,9 @@ static int __init aio_setup(void) if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); + if (bdi_init(&aio_fs_backing_dev_info)) + panic("Failed to init aio fs backing dev info."); + kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); @@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = { .mmap = aio_ring_mmap, }; -static int aio_set_page_dirty(struct page *page) -{ - return 0; -} - #if IS_ENABLED(CONFIG_MIGRATION) static int aio_migratepage(struct address_space *mapping, struct page *new, struct page *old, enum migrate_mode mode) @@ -357,7 +365,7 @@ out: #endif static const struct address_space_operations aio_ctx_aops = { - .set_page_dirty = aio_set_page_dirty, + .set_page_dirty = __set_page_dirty_no_writeback, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif @@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx) pr_debug("pid(%d) page[%d]->count=%d\n", current->pid, i, page_count(page)); SetPageUptodate(page); - SetPageDirty(page); unlock_page(page); ctx->ring_pages[i] = page; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d3220d31d3cb..dcd9be32ac57 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); - if (*pg_index == (vcnt - 1) && *pg_offset == 0) - memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap_atomic(kaddr); flush_dcache_page(page_out); @@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, return 1; } + +/* + * When uncompressing data, we need to make sure and zero any parts of + * the biovec that were not filled in by the decompression code. pg_index + * and pg_offset indicate the last page and the last offset of that page + * that have been filled in. This will zero everything remaining in the + * biovec. + */ +void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, + unsigned long pg_index, + unsigned long pg_offset) +{ + while (pg_index < vcnt) { + struct page *page = bvec[pg_index].bv_page; + unsigned long off = bvec[pg_index].bv_offset; + unsigned long len = bvec[pg_index].bv_len; + + if (pg_offset < off) + pg_offset = off; + if (pg_offset < off + len) { + unsigned long bytes = off + len - pg_offset; + char *kaddr; + + kaddr = kmap_atomic(page); + memset(kaddr + pg_offset, 0, bytes); + kunmap_atomic(kaddr); + } + pg_index++; + pg_offset = 0; + } +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 0c803b4fbf93..d181f70caae0 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long nr_pages); int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); - +void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, + unsigned long pg_index, + unsigned long pg_offset); struct btrfs_compress_op { struct list_head *(*alloc_workspace)(void); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 19bc6162fb8e..150822ee0a0b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, { int i; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* lockdep really cares that we take all of these spinlocks - * in the right order. If any of the locks in the path are not - * currently blocking, it is going to complain. So, make really - * really sure by forcing the path to blocking before we clear - * the path blocking. - */ if (held) { btrfs_set_lock_blocking_rw(held, held_rw); if (held_rw == BTRFS_WRITE_LOCK) @@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, held_rw = BTRFS_READ_LOCK_BLOCKING; } btrfs_set_path_blocking(p); -#endif for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { if (p->nodes[i] && p->locks[i]) { @@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, } } -#ifdef CONFIG_DEBUG_LOCK_ALLOC if (held) btrfs_clear_lock_blocking_rw(held, held_rw); -#endif } /* this also releases the path */ @@ -2893,7 +2883,7 @@ cow_done: } p->locks[level] = BTRFS_WRITE_LOCK; } else { - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); @@ -3025,7 +3015,7 @@ again: } level = btrfs_header_level(b); - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5665d2149249..f8229ef1b46d 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -128,6 +128,26 @@ again: } /* + * take a spinning read lock. + * returns 1 if we get the read lock and 0 if we don't + * this won't wait for blocking writers + */ +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) +{ + if (atomic_read(&eb->blocking_writers)) + return 0; + + read_lock(&eb->lock); + if (atomic_read(&eb->blocking_writers)) { + read_unlock(&eb->lock); + return 0; + } + atomic_inc(&eb->read_locks); + atomic_inc(&eb->spinning_readers); + return 1; +} + +/* * returns 1 if we get the read lock and 0 if we don't * this won't wait for blocking writers */ @@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) atomic_read(&eb->blocking_readers)) return 0; - if (!write_trylock(&eb->lock)) - return 0; - + write_lock(&eb->lock); if (atomic_read(&eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b81e0e9a4894..c44a9d5f5362 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); + static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 78285f30909e..617553cdb7d3 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -373,6 +373,8 @@ cont: } done: kunmap(pages_in[page_in_index]); + if (!ret) + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); return ret; } @@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, goto out; } + /* + * the caller is already checking against PAGE_SIZE, but lets + * move this check closer to the memcpy/memset + */ + destlen = min_t(unsigned long, destlen, PAGE_SIZE); bytes = min_t(unsigned long, destlen, out_len - start_byte); kaddr = kmap_atomic(dest_page); memcpy(kaddr, workspace->buf + start_byte, bytes); + + /* + * btrfs_getblock is doing a zero on the tail of the page too, + * but this will cover anything missing from the decompressed + * data. + */ + if (bytes < destlen) + memset(kaddr+bytes, 0, destlen-bytes); kunmap_atomic(kaddr); out: return ret; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 759fa4e2de8f..fb22fd8d8fb8 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -299,6 +299,8 @@ done: zlib_inflateEnd(&workspace->strm); if (data_in) kunmap(pages_in[page_in_index]); + if (!ret) + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); return ret; } @@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - unsigned long bytes_left = destlen; + unsigned long bytes_left; unsigned long total_out = 0; + unsigned long pg_offset = 0; char *kaddr; + destlen = min_t(unsigned long, destlen, PAGE_SIZE); + bytes_left = destlen; + workspace->strm.next_in = data_in; workspace->strm.avail_in = srclen; workspace->strm.total_in = 0; @@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, unsigned long buf_start; unsigned long buf_offset; unsigned long bytes; - unsigned long pg_offset = 0; ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) @@ -384,6 +389,17 @@ next: ret = 0; zlib_inflateEnd(&workspace->strm); + + /* + * this should only happen if zlib returned fewer bytes than we + * expected. btrfs_get_block is responsible for zeroing from the + * end of the inline extent (destlen) to the end of the page + */ + if (pg_offset < destlen) { + kaddr = kmap_atomic(dest_page); + memset(kaddr + pg_offset, 0, destlen - pg_offset); + kunmap_atomic(kaddr); + } return ret; } diff --git a/fs/dcache.c b/fs/dcache.c index 3ffef7f4e5cd..5bc72b07fde2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -778,6 +778,7 @@ restart: struct dentry *parent = lock_parent(dentry); if (likely(!dentry->d_lockref.count)) { __dentry_kill(dentry); + dput(parent); goto restart; } if (parent) diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index fe839b915116..d67a16f2a45d 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -174,27 +174,6 @@ struct iso9660_options{ * Compute the hash for the isofs name corresponding to the dentry. */ static int -isofs_hash_common(struct qstr *qstr, int ms) -{ - const char *name; - int len; - - len = qstr->len; - name = qstr->name; - if (ms) { - while (len && name[len-1] == '.') - len--; - } - - qstr->hash = full_name_hash(name, len); - - return 0; -} - -/* - * Compute the hash for the isofs name corresponding to the dentry. - */ -static int isofs_hashi_common(struct qstr *qstr, int ms) { const char *name; @@ -263,6 +242,27 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, } #ifdef CONFIG_JOLIET +/* + * Compute the hash for the isofs name corresponding to the dentry. + */ +static int +isofs_hash_common(struct qstr *qstr, int ms) +{ + const char *name; + int len; + + len = qstr->len; + name = qstr->name; + if (ms) { + while (len && name[len-1] == '.') + len--; + } + + qstr->hash = full_name_hash(name, len); + + return 0; +} + static int isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) { diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index ed2b1151b171..7cbdf1b2e4ab 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) { if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); - dprintk("%s slot is busy\n", __func__); - return false; + /* Race breaker */ + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { + dprintk("%s slot is busy\n", __func__); + return false; + } + rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); } return true; } diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 747f3b95bd11..33a46a8dfaf7 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -335,12 +335,15 @@ void nfsd_lockd_shutdown(void); (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) #ifdef CONFIG_NFSD_V4_SECURITY_LABEL -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ - (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL) +#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL #else -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 +#define NFSD4_2_SECURITY_ATTRS 0 #endif +#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ + (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ + NFSD4_2_SECURITY_ATTRS) + static inline u32 nfsd_suppattrs0(u32 minorversion) { return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index e60125976873..34355818a2e0 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig @@ -1,4 +1,4 @@ -config OVERLAYFS_FS +config OVERLAY_FS tristate "Overlay filesystem support" help An overlay filesystem combines two filesystems - an 'upper' filesystem diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile index 8f91889480d0..900daed3e91d 100644 --- a/fs/overlayfs/Makefile +++ b/fs/overlayfs/Makefile @@ -2,6 +2,6 @@ # Makefile for the overlay filesystem. # -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o +obj-$(CONFIG_OVERLAY_FS) += overlay.o -overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o +overlay-objs := super.o inode.o dir.o readdir.o copy_up.o diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 15cd91ad9940..8ffc4b980f1b 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -284,8 +284,7 @@ out: return ERR_PTR(err); } -static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, - enum ovl_path_type type) +static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry) { int err; struct dentry *ret = NULL; @@ -294,8 +293,17 @@ static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, err = ovl_check_empty_dir(dentry, &list); if (err) ret = ERR_PTR(err); - else if (type == OVL_PATH_MERGE) - ret = ovl_clear_empty(dentry, &list); + else { + /* + * If no upperdentry then skip clearing whiteouts. + * + * Can race with copy-up, since we don't hold the upperdir + * mutex. Doesn't matter, since copy-up can't create a + * non-empty directory from an empty one. + */ + if (ovl_dentry_upper(dentry)) + ret = ovl_clear_empty(dentry, &list); + } ovl_cache_free(&list); @@ -487,8 +495,7 @@ out: return err; } -static int ovl_remove_and_whiteout(struct dentry *dentry, - enum ovl_path_type type, bool is_dir) +static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) { struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; @@ -500,7 +507,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, int err; if (is_dir) { - opaquedir = ovl_check_empty_and_clear(dentry, type); + opaquedir = ovl_check_empty_and_clear(dentry); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; @@ -515,9 +522,10 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, if (IS_ERR(whiteout)) goto out_unlock; - if (type == OVL_PATH_LOWER) { + upper = ovl_dentry_upper(dentry); + if (!upper) { upper = lookup_one_len(dentry->d_name.name, upperdir, - dentry->d_name.len); + dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto kill_whiteout; @@ -529,7 +537,6 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, } else { int flags = 0; - upper = ovl_dentry_upper(dentry); if (opaquedir) upper = opaquedir; err = -ESTALE; @@ -648,7 +655,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) cap_raise(override_cred->cap_effective, CAP_CHOWN); old_cred = override_creds(override_cred); - err = ovl_remove_and_whiteout(dentry, type, is_dir); + err = ovl_remove_and_whiteout(dentry, is_dir); revert_creds(old_cred); put_cred(override_cred); @@ -781,7 +788,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, } if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { - opaquedir = ovl_check_empty_and_clear(new, new_type); + opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index af2d18c9fcee..07d74b24913b 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -235,26 +235,36 @@ out: return err; } +static bool ovl_need_xattr_filter(struct dentry *dentry, + enum ovl_path_type type) +{ + return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); +} + ssize_t ovl_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { - if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && - ovl_is_private_xattr(name)) + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); + + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) return -ENODATA; - return vfs_getxattr(ovl_dentry_real(dentry), name, value, size); + return vfs_getxattr(realpath.dentry, name, value, size); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) { + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); ssize_t res; int off; - res = vfs_listxattr(ovl_dentry_real(dentry), list, size); + res = vfs_listxattr(realpath.dentry, list, size); if (res <= 0 || size == 0) return res; - if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE) + if (!ovl_need_xattr_filter(dentry, type)) return res; /* filter out private xattrs */ @@ -279,17 +289,16 @@ int ovl_removexattr(struct dentry *dentry, const char *name) { int err; struct path realpath; - enum ovl_path_type type; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); err = ovl_want_write(dentry); if (err) goto out; - if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && - ovl_is_private_xattr(name)) + err = -ENODATA; + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) goto out_drop_write; - type = ovl_path_real(dentry, &realpath); if (type == OVL_PATH_LOWER) { err = vfs_getxattr(realpath.dentry, name, NULL, 0); if (err < 0) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 2a7ef4f8e2a6..ab1e3dcbed95 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -274,11 +274,11 @@ static int ovl_dir_mark_whiteouts(struct dentry *dir, return 0; } -static inline int ovl_dir_read_merged(struct path *upperpath, - struct path *lowerpath, - struct list_head *list) +static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; + struct path lowerpath; + struct path upperpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .list = list, @@ -286,25 +286,28 @@ static inline int ovl_dir_read_merged(struct path *upperpath, .is_merge = false, }; - if (upperpath->dentry) { - err = ovl_dir_read(upperpath, &rdd); + ovl_path_lower(dentry, &lowerpath); + ovl_path_upper(dentry, &upperpath); + + if (upperpath.dentry) { + err = ovl_dir_read(&upperpath, &rdd); if (err) goto out; - if (lowerpath->dentry) { - err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd); + if (lowerpath.dentry) { + err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); if (err) goto out; } } - if (lowerpath->dentry) { + if (lowerpath.dentry) { /* * Insert lowerpath entries before upperpath ones, this allows * offsets to be reasonably constant */ list_add(&rdd.middle, rdd.list); rdd.is_merge = true; - err = ovl_dir_read(lowerpath, &rdd); + err = ovl_dir_read(&lowerpath, &rdd); list_del(&rdd.middle); } out: @@ -329,8 +332,6 @@ static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) { int res; - struct path lowerpath; - struct path upperpath; struct ovl_dir_cache *cache; cache = ovl_dir_cache(dentry); @@ -347,10 +348,7 @@ static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) cache->refcount = 1; INIT_LIST_HEAD(&cache->entries); - ovl_path_lower(dentry, &lowerpath); - ovl_path_upper(dentry, &upperpath); - - res = ovl_dir_read_merged(&upperpath, &lowerpath, &cache->entries); + res = ovl_dir_read_merged(dentry, &cache->entries); if (res) { ovl_cache_free(&cache->entries); kfree(cache); @@ -452,10 +450,10 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, /* * Need to check if we started out being a lower dir, but got copied up */ - if (!od->is_upper && ovl_path_type(dentry) == OVL_PATH_MERGE) { + if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { struct inode *inode = file_inode(file); - realfile =lockless_dereference(od->upperfile); + realfile = lockless_dereference(od->upperfile); if (!realfile) { struct path upperpath; @@ -538,14 +536,9 @@ const struct file_operations ovl_dir_operations = { int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) { int err; - struct path lowerpath; - struct path upperpath; struct ovl_cache_entry *p; - ovl_path_upper(dentry, &upperpath); - ovl_path_lower(dentry, &lowerpath); - - err = ovl_dir_read_merged(&upperpath, &lowerpath, list); + err = ovl_dir_read_merged(dentry, list); if (err) return err; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 08b704cebfc4..f16d318b71f8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -24,7 +24,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); -#define OVERLAYFS_SUPER_MAGIC 0x794c764f +#define OVERLAYFS_SUPER_MAGIC 0x794c7630 struct ovl_config { char *lowerdir; @@ -84,12 +84,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) { - struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); - /* - * Make sure to order reads to upperdentry wrt ovl_dentry_update() - */ - smp_read_barrier_depends(); - return upperdentry; + return lockless_dereference(oe->__upperdentry); } void ovl_path_upper(struct dentry *dentry, struct path *path) @@ -462,11 +457,34 @@ static const match_table_t ovl_tokens = { {OPT_ERR, NULL} }; +static char *ovl_next_opt(char **s) +{ + char *sbegin = *s; + char *p; + + if (sbegin == NULL) + return NULL; + + for (p = sbegin; *p; p++) { + if (*p == '\\') { + p++; + if (!*p) + break; + } else if (*p == ',') { + *p = '\0'; + *s = p + 1; + return sbegin; + } + } + *s = NULL; + return sbegin; +} + static int ovl_parse_opt(char *opt, struct ovl_config *config) { char *p; - while ((p = strsep(&opt, ",")) != NULL) { + while ((p = ovl_next_opt(&opt)) != NULL) { int token; substring_t args[MAX_OPT_ARGS]; @@ -554,15 +572,34 @@ out_dput: goto out_unlock; } +static void ovl_unescape(char *s) +{ + char *d = s; + + for (;; s++, d++) { + if (*s == '\\') + s++; + *d = *s; + if (!*s) + break; + } +} + static int ovl_mount_dir(const char *name, struct path *path) { int err; + char *tmp = kstrdup(name, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; - err = kern_path(name, LOOKUP_FOLLOW, path); + ovl_unescape(tmp); + err = kern_path(tmp, LOOKUP_FOLLOW, path); if (err) { - pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); + pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); err = -EINVAL; } + kfree(tmp); return err; } @@ -776,11 +813,11 @@ static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, static struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, - .name = "overlayfs", + .name = "overlay", .mount = ovl_mount, .kill_sb = kill_anon_super, }; -MODULE_ALIAS_FS("overlayfs"); +MODULE_ALIAS_FS("overlay"); static int __init ovl_init(void) { diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h index a929f86d0ddd..d72b5b35f15e 100644 --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -60,7 +60,7 @@ #define ESC1_CLK_SRC 43 #define HDMI_CLK_SRC 44 #define VSYNC_CLK_SRC 45 -#define RBCPR_CLK_SRC 46 +#define MMSS_RBCPR_CLK_SRC 46 #define RBBMTIMER_CLK_SRC 47 #define MAPLE_CLK_SRC 48 #define VDP_CLK_SRC 49 diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a0..5d858e02997f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -18,8 +18,11 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7f..b37ea95bc348 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -99,6 +99,12 @@ inval_skb: return 1; } +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index be21af149f11..2839c639f092 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -352,7 +352,6 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; -extern const struct clk_ops clk_divider_ro_ops; struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index 8bbd7bc1043d..03fa332ad2a8 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -72,7 +72,7 @@ struct iio_event_data { #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) -#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) +#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0068708161ff..0a21fbefdfbe 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) - return htonl(~((1<<(32-logmask))-1)); + return htonl(~((1U<<(32-logmask))-1)); return 0; } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8422b4ed6882..b9376cd5a187 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -/* - * Lock/unlock the current runqueue - to extract task statistics: - */ -extern unsigned long long task_delta_exec(struct task_struct *); - extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea53b04993f2..a6059bdf7b03 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -bool kvm_is_mmio_pfn(pfn_t pfn); +bool kvm_is_reserved_pfn(pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; diff --git a/include/linux/pci.h b/include/linux/pci.h index 5be8db45e368..4c8ac5fcc224 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -331,6 +331,7 @@ struct pci_dev { unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e6..51ce60c35f4c 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; diff --git a/include/net/inet_common.h b/include/net/inet_common.h index fe7994c48b75..b2828a06a5a6 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net); +int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); static inline void inet_ctl_sock_destroy(struct sock *sk) { diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 845c596bf594..3ae969e3acf0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -396,14 +396,12 @@ struct nft_rule { /** * struct nft_trans - nf_tables object update in transaction * - * @rcu_head: rcu head to defer release of transaction data * @list: used internally * @msg_type: message type * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { - struct rcu_head rcu_head; struct list_head list; int msg_type; struct nft_ctx ctx; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d5f59f3fc35d..57cccd0052e5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -8,6 +8,12 @@ #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +/* VXLAN protocol header */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + struct vxlan_sock; typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); @@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); +static inline bool vxlan_gso_check(struct sk_buff *skb) +{ + if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && + (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return false; + + return true; +} + /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) /* IPv6 header + UDP + VXLAN + Ethernet header */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 2b02c9fda790..1cd5eef1fcdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group if (!task) { /* - * Per cpu events are removed via an smp call and - * the removal is always successful. + * Per cpu events are removed via an smp call. The removal can + * fail if the CPU is currently offline, but in that case we + * already called __perf_remove_from_context from + * perf_event_exit_cpu. */ cpu_function_call(event->cpu, __perf_remove_from_context, &re); return; @@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) static void __perf_event_exit_context(void *__info) { - struct remove_event re = { .detach_group = false }; + struct remove_event re = { .detach_group = true }; struct perf_event_context *ctx = __info; perf_pmu_rotate_stop(ctx->pmu); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1d0af8a2c646..ed8f2cde34c5 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void) if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { utask->state = UTASK_SSTEP_TRAPPED; set_tsk_thread_flag(t, TIF_UPROBE); - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); } } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..24beb9bb4c3e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* - * Return any ns on the sched_clock that have not yet been accounted in - * @p in case that task is currently running. - * - * Called with task_rq_lock() held on @rq. - */ -static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) -{ - u64 ns = 0; - - /* - * Must be ->curr _and_ ->on_rq. If dequeued, we would - * project cycles that may never be accounted to this - * thread, breaking clock_gettime(). - */ - if (task_current(rq, p) && task_on_rq_queued(p)) { - update_rq_clock(rq); - ns = rq_clock_task(rq) - p->se.exec_start; - if ((s64)ns < 0) - ns = 0; - } - - return ns; -} - -unsigned long long task_delta_exec(struct task_struct *p) -{ - unsigned long flags; - struct rq *rq; - u64 ns = 0; - - rq = task_rq_lock(p, &flags); - ns = do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); - - return ns; -} - -/* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. @@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags; struct rq *rq; - u64 ns = 0; + u64 ns; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) /* @@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) #endif rq = task_rq_lock(p, &flags); - ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); + /* + * Must be ->curr _and_ ->on_rq. If dequeued, we would + * project cycles that may never be accounted to this + * thread, breaking clock_gettime(). + */ + if (task_current(rq, p) && task_on_rq_queued(p)) { + update_rq_clock(rq); + p->sched_class->update_curr(rq); + } + ns = p->se.sum_exec_runtime; task_rq_unlock(rq, p, &flags); return ns; @@ -6368,6 +6339,10 @@ static void sched_init_numa(void) if (!sched_debug()) break; } + + if (!level) + return; + /* * 'level' contains the number of unique distances, excluding the * identity distance node_distance(i,i). @@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgrp_id, - lockdep_is_held(&tsk->sighand->siglock)), + /* + * All callers are synchronized by task_rq_lock(); we do not use RCU + * which is pointless here. Thus, we pass "true" to task_css_check() + * to prevent lockdep warnings. + */ + tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), struct task_group, css); tg = autogroup_task_group(tsk, tg); tsk->sched_task_group = tg; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 5285332392d5..28fa9d9e9201 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = { .prio_changed = prio_changed_dl, .switched_from = switched_from_dl, .switched_to = switched_to_dl, + + .update_curr = update_curr_dl, }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 34baa60f8a7b..ef2b104b254c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) account_cfs_rq_runtime(cfs_rq, delta_exec); } +static void update_curr_fair(struct rq *rq) +{ + update_curr(cfs_rq_of(&rq->curr->se)); +} + static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -1180,6 +1185,13 @@ static void task_numa_compare(struct task_numa_env *env, raw_spin_unlock_irq(&dst_rq->lock); /* + * Because we have preemption enabled we can get migrated around and + * end try selecting ourselves (current == env->p) as a swap candidate. + */ + if (cur == env->p) + goto unlock; + + /* * "imp" is the fault differential for the source task between the * source and destination node. Calculate the total differential for * the source task and potential destination task. The more negative @@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, + .update_curr = update_curr_fair, + #ifdef CONFIG_FAIR_GROUP_SCHED .task_move_group = task_move_group_fair, #endif diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 67ad4e7f506a..c65dac8c97cd 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task return 0; } +static void update_curr_idle(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, + .update_curr = update_curr_idle, }; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d024e6ce30ba..20bca398084a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = { .prio_changed = prio_changed_rt, .switched_to = switched_to_rt, + + .update_curr = update_curr_rt, }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24156c8434d1..2df8ef067cc5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1135,6 +1135,8 @@ struct sched_class { unsigned int (*get_rr_interval) (struct rq *rq, struct task_struct *task); + void (*update_curr) (struct rq *rq); + #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p, int on_rq); #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 67426e529f59..79ffec45a6ac 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) return 0; } +static void update_curr_stop(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU stop tasks: */ @@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, + .update_curr = update_curr_stop, }; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 492b986195d5..a16b67859e2a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: - *sample = cputime.sum_exec_runtime + task_delta_exec(p); + *sample = cputime.sum_exec_runtime; break; } return 0; diff --git a/lib/Makefile b/lib/Makefile index 7512dc978f18..0211d2bd5e17 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -10,7 +10,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ + sha1.o md5.o irq_regs.o argv_split.o \ proportions.o flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o + percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 648d79ccf462..c465876c7861 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br, return; if (port) { - __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, - dev_queue_xmit); + br_dev_queue_push_xmit); } else { br_multicast_select_own_querier(br, ip, skb); netif_rx(skb); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f2..e5ec470b851f 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, + [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a6882686ca3a..b9b7dfaf202b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) int idx = 0; u32 portid = NETLINK_CB(cb->skb).portid; u32 seq = cb->nlh->nlmsg_seq; - struct nlattr *extfilt; u32 filter_mask = 0; - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), - IFLA_EXT_MASK); - if (extfilt) - filter_mask = nla_get_u32(extfilt); + if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { + struct nlattr *extfilt; + + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), + IFLA_EXT_MASK); + if (extfilt) { + if (nla_len(extfilt) < sizeof(filter_mask)) + return -EINVAL; + + filter_mask = nla_get_u32(extfilt); + } + } rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_len(attr) < sizeof(flags)) + return -EINVAL; + have_flags = true; flags = nla_get_u16(attr); break; @@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_len(attr) < sizeof(flags)) + return -EINVAL; + have_flags = true; flags = nla_get_u16(attr); break; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c16615bfb61e..32e31c299631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) case SKB_FCLONE_CLONE: fclones = container_of(skb, struct sk_buff_fclones, skb2); - /* Warning : We must perform the atomic_dec_and_test() before - * setting skb->fclone back to SKB_FCLONE_FREE, otherwise - * skb_clone() could set clone_ref to 2 before our decrement. - * Anyway, if we are going to free the structure, no need to - * rewrite skb->fclone. + /* The clone portion is available for + * fast-cloning again. */ - if (atomic_dec_and_test(&fclones->fclone_ref)) { + skb->fclone = SKB_FCLONE_FREE; + + if (atomic_dec_and_test(&fclones->fclone_ref)) kmem_cache_free(skbuff_fclone_cache, fclones); - } else { - /* The clone portion is available for - * fast-cloning again. - */ - skb->fclone = SKB_FCLONE_FREE; - } break; } } @@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb->fclone == SKB_FCLONE_ORIG && n->fclone == SKB_FCLONE_FREE) { n->fclone = SKB_FCLONE_CLONE; - /* As our fastclone was free, clone_ref must be 1 at this point. - * We could use atomic_inc() here, but it is faster - * to set the final value. - */ - atomic_set(&fclones->fclone_ref, 2); + atomic_inc(&fclones->fclone_ref); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index ca11d283bbeb..93ea80196f0e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (!app) return -EMSGSIZE; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), &itr->app); if (err) { - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } @@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); /* get peer info if available */ @@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) } /* local app */ - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; @@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { @@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) return 0; dcb_unlock: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_put_failure: return err; } @@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio = itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { if (new->priority) @@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (new->priority) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio |= 1 << itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; @@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); @@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) err = 0; } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1902,12 +1902,12 @@ static void dcb_flushapp(void) struct dcb_app_type *app; struct dcb_app_type *tmp; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); } static int __init dcbnl_init(void) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8b7fe5b03906..e67da4e6c324 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1386,6 +1386,17 @@ out: return pp; } +int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) +{ + if (sk->sk_family == AF_INET) + return ip_recv_error(sk, msg, len, addr_len); +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); +#endif + return -EINVAL; +} + static int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f2e15738534d..8f7bd56955b0 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) else res->tclassid = 0; #endif + + if (err == -ESRCH) + err = -ENETUNREACH; + return err; } EXPORT_SYMBOL_GPL(__fib_lookup); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e4..bb15d0e03d4f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) return scount; } -#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) - -static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) +static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) { struct sk_buff *skb; struct rtable *rt; @@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu; while (1) { skb = alloc_skb(size + hlen + tlen, @@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) return NULL; } skb->priority = TC_PRIO_CONTROL; - igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 0, 0, @@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3e861011e4a3..1a7e979e80ba 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { .validate = vti_tunnel_validate, .newlink = vti_newlink, .changelink = vti_changelink, + .dellink = ip_tunnel_dellink, .get_size = vti_get_size, .fill_info = vti_fill_info, }; diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index c1023c445920..665de06561cd 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 57f7c9804139..5d740cccf69e 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) &ipv6_hdr(skb)->daddr)) continue; #endif + } else { + continue; } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) @@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - if (flags & MSG_ERRQUEUE) { - if (family == AF_INET) { - return ip_recv_error(sk, msg, len, addr_len); -#if IS_ENABLED(CONFIG_IPV6) - } else if (family == AF_INET6) { - return pingv6_ops.ipv6_recv_error(sk, msg, len, - addr_len); -#endif - } - } + if (flags & MSG_ERRQUEUE) + return inet_recv_error(sk, msg, len, addr_len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 39ec0c379545..38c2bcb8dd5d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, u32 urg_hole = 0; if (unlikely(flags & MSG_ERRQUEUE)) - return ip_recv_error(sk, msg, len, addr_len); + return inet_recv_error(sk, msg, len, addr_len); if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && (sk->sk_state == TCP_ESTABLISHED)) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 88fa2d160685..d107ee246a1d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5231,7 +5231,7 @@ slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; /* @@ -5650,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; } - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9c7d7621466b..147be2024290 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) if (th->rst) return; - if (skb_rtable(skb)->rt_type != RTN_LOCAL) + /* If sk not NULL, it means we did a successful lookup and incoming + * route had to be correct. prequeue might have dropped our dst. + */ + if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4564e1fca3eb..0e32d2e1bdbf 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb) skb->protocol = gre_proto; /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP + * - Change protocol to IPv6 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { - skb->protocol = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IPV6); if ((*(h + offset) & 0xF0) != 0x40) offset += 4; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a071563a7e6e..01e12d0d8fcc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & - ~(SKB_GSO_UDP | + ~(SKB_GSO_TCPV4 | + SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index b04ed72c4542..8db6c98fe218 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, uh->source = src_port; uh->len = htons(skb->len); - uh->check = 0; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst); - udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, - &sk->sk_v6_daddr, skb->len); + udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 31089d153fd3..bcda14de7f84 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -905,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, return vti6_tnl_create2(dev); } +static void vti6_dellink(struct net_device *dev, struct list_head *head) +{ + struct net *net = dev_net(dev); + struct vti6_net *ip6n = net_generic(net, vti6_net_id); + + if (dev != ip6n->fb_tnl_dev) + unregister_netdevice_queue(dev, head); +} + static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -980,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = { .setup = vti6_dev_setup, .validate = vti6_validate, .newlink = vti6_newlink, + .dellink = vti6_dellink, .changelink = vti6_changelink, .get_size = vti6_get_size, .fill_info = vti6_fill_info, @@ -1020,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net) if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); + ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); if (err < 0) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..1a01d79b8698 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1439,6 +1439,10 @@ reg_pernet_fail: void ip6_mr_cleanup(void) { + rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); +#ifdef CONFIG_IPV6_PIMSM_V2 + inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); +#endif unregister_netdevice_notifier(&ip6_mr_notifier); unregister_pernet_subsys(&ip6mr_net_ops); kmem_cache_destroy(mrt_cachep); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b6745..ed2c4e400b46 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, hdr->daddr = *daddr; } -static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) { struct net_device *dev = idev->dev; struct net *net = dev_net(dev); @@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) const struct in6_addr *saddr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu + hlen + tlen; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) return NULL; skb->priority = TC_PRIO_CONTROL; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { @@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted, int crsend) diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 8a7ac685076d..529c119cbb14 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ace29b60813c..dc495ae2ead0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) if (th->rst) return; - if (!ipv6_unicast_destination(skb)) + /* If sk not NULL, it means we did a successful lookup and incoming + * route had to be correct. prequeue might have dropped our dst. + */ + if (!sk && !ipv6_unicast_destination(skb)) return; #ifdef CONFIG_TCP_MD5SIG diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..1b095ca37aa4 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; + bool locked = true; lock_sock(sk); /* put the autobinding in */ @@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, if (sock_flag(sk, SOCK_ZAPPED)) goto out; + release_sock(sk); + locked = false; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) { @@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, out_free: skb_free_datagram(sk, skb); out: - release_sock(sk); + if (locked) + release_sock(sk); return rc; } diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index ec24378caaaf..09d9caaec591 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; + if (data_len == 0) + return -EINVAL; + memset(aead_req, 0, sizeof(aead_req_data)); sg_init_one(&pt, data, data_len); diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index df90ce2db00c..408fd8ab4eef 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; cur_prob = mi->groups[cur_group].rates[cur_idx].probability; - tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; - tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; - tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - - while (j > 0 && (cur_thr > tmp_thr || - (cur_thr == tmp_thr && cur_prob > tmp_prob))) { - j--; + do { tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - } + if (cur_thr < tmp_thr || + (cur_thr == tmp_thr && cur_prob <= tmp_prob)) + break; + j--; + } while (j > 0); if (j < MAX_THR_RATES - 1) { memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 86f9d76b1464..d259da3ce67a 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; + + if (*len < sizeof(struct ip_set_req_version)) { + ret = -EINVAL; + goto done; + } + if (req_version->version != IPSET_PROTOCOL) { ret = -EPROTO; goto done; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 437a3663ad03..bd90bf8107da 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto error; + if (skb->sk) + skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 11ab4b078f3b..66e8425dbfe7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) } } -/* Schedule objects for release via rcu to make sure no packets are accesing - * removed rules. - */ -static void nf_tables_commit_release_rcu(struct rcu_head *rt) +static void nf_tables_commit_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); + nf_tables_commit_release(trans); } nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); @@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb) return 0; } -/* Schedule objects for release via rcu to make sure no packets are accesing - * aborted rules. - */ -static void nf_tables_abort_release_rcu(struct rcu_head *rt) +static void nf_tables_abort_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_NEWTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu); + nf_tables_abort_release(trans); } return 0; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6c5a915cfa75..13c2e17bbe27 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = { [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, + [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, + [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, }; void nfnl_lock(__u8 subsys_id) @@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb) static int nfnetlink_bind(int group) { const struct nfnetlink_subsystem *ss; - int type = nfnl_group2type[group]; + int type; + + if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) + return -EINVAL; + + type = nfnl_group2type[group]; rcu_read_lock(); ss = nfnetlink_get_subsys(type); @@ -514,6 +521,9 @@ static int __init nfnetlink_init(void) { int i; + for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) + BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); + for (i=0; i<NFNL_SUBSYS_COUNT; i++) mutex_init(&table[i].mutex); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 9d6d6f60a80f..265e190f2218 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -21,45 +21,17 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/nf_tables.h> -static const struct { - const char *name; - u8 type; -} table_to_chaintype[] = { - { "filter", NFT_CHAIN_T_DEFAULT }, - { "raw", NFT_CHAIN_T_DEFAULT }, - { "security", NFT_CHAIN_T_DEFAULT }, - { "mangle", NFT_CHAIN_T_ROUTE }, - { "nat", NFT_CHAIN_T_NAT }, - { }, -}; - -static int nft_compat_table_to_chaintype(const char *table) -{ - int i; - - for (i = 0; table_to_chaintype[i].name != NULL; i++) { - if (strcmp(table_to_chaintype[i].name, table) == 0) - return table_to_chaintype[i].type; - } - - return -1; -} - static int nft_compat_chain_validate_dependency(const char *tablename, const struct nft_chain *chain) { - enum nft_chain_type type; const struct nft_base_chain *basechain; if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) return 0; - type = nft_compat_table_to_chaintype(tablename); - if (type < 0) - return -EINVAL; - basechain = nft_base_chain(chain); - if (basechain->type->type != type) + if (strcmp(tablename, "nat") == 0 && + basechain->type->type != NFT_CHAIN_T_NAT) return -EINVAL; return 0; @@ -117,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, struct xt_target *target, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -324,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, struct xt_match *match, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -374,7 +346,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; - ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); + ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); if (ret < 0) goto err; @@ -448,7 +420,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, if (!(hook_mask & match->hooks)) return -EINVAL; - ret = nft_compat_chain_validate_dependency(match->name, + ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); if (ret < 0) return ret; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..8c4229b11c34 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, { int transport_len = skb->len - skb_transport_offset(skb); - if (l4_proto == IPPROTO_TCP) { + if (l4_proto == NEXTHDR_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); - } else if (l4_proto == IPPROTO_UDP) { + } else if (l4_proto == NEXTHDR_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); @@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, uh->check = CSUM_MANGLED_0; } } + } else if (l4_proto == NEXTHDR_ICMP) { + if (likely(transport_len >= sizeof(struct icmp6hdr))) + inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, + skb, addr, new_addr, 1); } } @@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, key, a); - if (unlikely(err)) /* skb already freed. */ - return err; break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6d7255183eb..f9e556b56086 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1265,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void) return msgsize; } -/* Called with ovs_mutex or RCU read lock. */ +/* Called with ovs_mutex. */ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) { @@ -1555,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) if (!reply) return -ENOMEM; - rcu_read_lock(); + ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); if (IS_ERR(dp)) { err = PTR_ERR(dp); @@ -1564,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, info->snd_seq, 0, OVS_DP_CMD_NEW); BUG_ON(err < 0); - rcu_read_unlock(); + ovs_unlock(); return genlmsg_reply(reply, info); err_unlock_free: - rcu_read_unlock(); + ovs_unlock(); kfree_skb(reply); return err; } @@ -1581,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; - rcu_read_lock(); - list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { + ovs_lock(); + list_for_each_entry(dp, &ovs_net->dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -1590,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) break; i++; } - rcu_read_unlock(); + ovs_unlock(); cb->args[0] = i; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..089b195c064a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_ARP) || match->key->eth.type == htons(ETH_P_RARP)) { key_expected |= 1 << OVS_KEY_ATTR_ARP; - if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + if (match->mask && (match->mask->key.tp.src == htons(0xff))) mask_allowed |= 1 << OVS_KEY_ATTR_ARP; } @@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } + + if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { + OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", + ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); + return -EINVAL; + } + SW_FLOW_KEY_PUT(match, ipv6.label, ipv6_key->ipv6_label, is_mask); SW_FLOW_KEY_PUT(match, ip.proto, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 87d20f48ff06..07c04a841ba0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync) __unregister_prot_hook(sk, sync); } -static inline __pure struct page *pgv_to_page(void *addr) +static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3f959c681885..f9c052d508f0 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) xid = *p++; calldir = *p; - if (bc_xprt) - req = xprt_lookup_rqst(bc_xprt, xid); - - if (!req) { - printk(KERN_NOTICE - "%s: Got unrecognized reply: " - "calldir 0x%x xpt_bc_xprt %p xid %08x\n", - __func__, ntohl(calldir), - bc_xprt, ntohl(xid)); + if (!bc_xprt) return -EAGAIN; - } + spin_lock_bh(&bc_xprt->transport_lock); + req = xprt_lookup_rqst(bc_xprt, xid); + if (!req) + goto unlock_notfound; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); /* @@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) dst = &req->rq_private_buf.head[0]; src = &rqstp->rq_arg.head[0]; if (dst->iov_len < src->iov_len) - return -EAGAIN; /* whatever; just giving up. */ + goto unlock_eagain; /* whatever; just giving up. */ memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); rqstp->rq_arg.len = 0; + spin_unlock_bh(&bc_xprt->transport_lock); return 0; +unlock_notfound: + printk(KERN_NOTICE + "%s: Got unrecognized reply: " + "calldir 0x%x xpt_bc_xprt %p xid %08x\n", + __func__, ntohl(calldir), + bc_xprt, ntohl(xid)); +unlock_eagain: + spin_unlock_bh(&bc_xprt->transport_lock); + return -EAGAIN; } static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 728663d6746f..fc7aff0eb562 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -302,7 +302,8 @@ enum { /* quirks for ATI/AMD HDMI */ #define AZX_DCAPS_PRESET_ATI_HDMI \ - (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) + (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\ + AZX_DCAPS_NO_MSI64) /* quirks for ATI HDMI with snoop off */ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \ @@ -1493,6 +1494,7 @@ static int azx_first_init(struct azx *chip) struct snd_card *card = chip->card; int err; unsigned short gcap; + unsigned int dma_bits = 64; #if BITS_PER_LONG != 64 /* Fix up base address on ULI M5461 */ @@ -1516,9 +1518,14 @@ static int azx_first_init(struct azx *chip) return -ENXIO; } - if (chip->msi) + if (chip->msi) { + if (chip->driver_caps & AZX_DCAPS_NO_MSI64) { + dev_dbg(card->dev, "Disabling 64bit MSI\n"); + pci->no_64bit_msi = true; + } if (pci_enable_msi(pci) < 0) chip->msi = 0; + } if (azx_acquire_irq(chip, 0) < 0) return -EBUSY; @@ -1529,9 +1536,14 @@ static int azx_first_init(struct azx *chip) gcap = azx_readw(chip, GCAP); dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); + /* AMD devices support 40 or 48bit DMA, take the safe one */ + if (chip->pci->vendor == PCI_VENDOR_ID_AMD) + dma_bits = 40; + /* disable SB600 64bit support for safety */ if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { struct pci_dev *p_smbus; + dma_bits = 40; p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); @@ -1561,9 +1573,11 @@ static int azx_first_init(struct azx *chip) } /* allow 64bit DMA address if supported by H/W */ - if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) - pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); - else { + if (!(gcap & AZX_GCAP_64OK)) + dma_bits = 32; + if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) { + pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits)); + } else { pci_set_dma_mask(pci, DMA_BIT_MASK(32)); pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); } diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h index 602536c2147d..a09703a2b2c1 100644 --- a/sound/pci/hda/hda_priv.h +++ b/sound/pci/hda/hda_priv.h @@ -170,6 +170,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ +#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ enum { AZX_SNOOP_TYPE_NONE , diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3aaca49de325..aacdb59f30de 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1933,7 +1933,7 @@ out: int kvm_vgic_create(struct kvm *kvm) { - int i, vcpu_lock_idx = -1, ret = 0; + int i, vcpu_lock_idx = -1, ret; struct kvm_vcpu *vcpu; mutex_lock(&kvm->lock); @@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm) * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure * that no other VCPUs are run while we create the vgic. */ + ret = -EBUSY; kvm_for_each_vcpu(i, vcpu, kvm) { if (!mutex_trylock(&vcpu->mutex)) goto out_unlock; @@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm) } kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) { - ret = -EBUSY; + if (vcpu->arch.has_run_once) goto out_unlock; - } } + ret = 0; spin_lock_init(&kvm->arch.vgic.lock); kvm->arch.vgic.in_kernel = true; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 25ffac9e947d..3cee7b167052 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; -bool kvm_is_mmio_pfn(pfn_t pfn) +bool kvm_is_reserved_pfn(pfn_t pfn) { if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + return PageReserved(pfn_to_page(pfn)); return true; } @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - BUG_ON(!kvm_is_mmio_pfn(pfn)); + BUG_ON(!kvm_is_reserved_pfn(pfn)); } else { if (async && vma_is_valid(vma, write_fault)) *async = true; @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; - if (kvm_is_mmio_pfn(pfn)) { + if (kvm_is_reserved_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn) void kvm_set_pfn_dirty(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); |