diff options
1606 files changed, 15470 insertions, 8293 deletions
@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)> James Bottomley <jejb@titanic.il.steeleye.com> James E Wilson <wilson@specifix.com> James Ketrenos <jketreno@io.(none)> +<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> Jean Tourrilhes <jt@hpl.hp.com> Jeff Garzik <jgarzik@pretzel.yyz.us> Jens Axboe <axboe@suse.de> diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -43,7 +43,7 @@ o udev 081 # udevd --version o grub 0.93 # grub --version || grub-install --version o mcelog 0.6 # mcelog --version o iptables 1.4.2 # iptables -V -o openssl & libcrypto 1.0.1k # openssl version +o openssl & libcrypto 1.0.0 # openssl version Kernel compilation diff --git a/Documentation/arm/OMAP/README b/Documentation/arm/OMAP/README new file mode 100644 index 000000000000..75645c45d14a --- /dev/null +++ b/Documentation/arm/OMAP/README @@ -0,0 +1,7 @@ +This file contains documentation for running mainline +kernel on omaps. + +KERNEL NEW DEPENDENCIES +v4.3+ Update is needed for custom .config files to make sure + CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work + properly. diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt index 0d5bc46dc167..ad6949bff2e3 100644 --- a/Documentation/device-mapper/snapshot.txt +++ b/Documentation/device-mapper/snapshot.txt @@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor the amount of free space and expand the <COW device> before it fills up. <persistent?> is P (Persistent) or N (Not persistent - will not survive -after reboot). -The difference is that for transient snapshots less metadata must be -saved on disk - they can be kept in memory by the kernel. +after reboot). O (Overflow) can be added as a persistent store option +to allow userspace to advertise its support for seeing "Overflow" in the +snapshot status. So supported store types are "P", "PO" and "N". + +The difference between persistent and transient is with transient +snapshots less metadata must be saved on disk - they can be kept in +memory by the kernel. * snapshot-merge <origin> <COW device> <persistent> <chunksize> diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt index ddfade40ac59..7803e77d85cb 100644 --- a/Documentation/devicetree/bindings/arm/gic-v3.txt +++ b/Documentation/devicetree/bindings/arm/gic-v3.txt @@ -57,6 +57,8 @@ used to route Message Signalled Interrupts (MSI) to the CPUs. These nodes must have the following properties: - compatible : Should at least contain "arm,gic-v3-its". - msi-controller : Boolean property. Identifies the node as an MSI controller +- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device + which will generate the MSI. - reg: Specifies the base physical address and size of the ITS registers. @@ -83,6 +85,7 @@ Examples: gic-its@2c200000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x2c200000 0 0x200000>; }; }; @@ -107,12 +110,14 @@ Examples: gic-its@2c200000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x2c200000 0 0x200000>; }; gic-its@2c400000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x2c400000 0 0x200000>; }; }; diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index a8274eabae2e..b8e41c148a3c 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -497,7 +497,7 @@ cpus { }; idle-states { - entry-method = "arm,psci"; + entry-method = "psci"; CPU_RETENTION_0_0: cpu-retention-0-0 { compatible = "arm,idle-state"; diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt index 5788d5cf1252..82d40e2505f6 100644 --- a/Documentation/devicetree/bindings/gpio/gpio.txt +++ b/Documentation/devicetree/bindings/gpio/gpio.txt @@ -16,7 +16,9 @@ properties, each containing a 'gpio-list': GPIO properties should be named "[<name>-]gpios", with <name> being the purpose of this GPIO for the device. While a non-existent <name> is considered valid for compatibility reasons (resolving to the "gpios" property), it is not allowed -for new bindings. +for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old +bindings use it, but are only supported for compatibility reasons and should not +be used for newer bindings since it has been deprecated. GPIO properties can contain one or more GPIO phandles, but only in exceptional cases should they contain more than one. If your device uses several GPIOs with diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt index c5933573e0f6..4a3679d54457 100644 --- a/Documentation/devicetree/bindings/iio/accel/bma180.txt +++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt @@ -1,10 +1,11 @@ -* Bosch BMA180 triaxial acceleration sensor +* Bosch BMA180 / BMA250 triaxial acceleration sensor http://omapworld.com/BMA180_111_1002839.pdf +http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf Required properties: - - compatible : should be "bosch,bma180" + - compatible : should be "bosch,bma180" or "bosch,bma250" - reg : the I2C address of the sensor Optional properties: @@ -13,6 +14,9 @@ Optional properties: - interrupts : interrupt mapping for GPIO IRQ, it should by configured with flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING + For the bma250 the first interrupt listed must be the one + connected to the INT1 pin, the second (optional) interrupt + listed must be the one connected to the INT2 pin. Example: diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt @@ -25,7 +25,7 @@ Example: /* Cypress Gen3 touchpad */ touchpad@67 { compatible = "cypress,cyapa"; - reg = <0x24>; + reg = <0x67>; interrupt-parent = <&gpio>; interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ wakeup-source; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt @@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority interrupt. Required Properties: -- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" - as fallback +- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or + "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" - reg: Base address and size of the controllers memory area - interrupt-parent: phandle of the parent interrupt controller. - interrupts: Interrupt specifier for the controllers interrupt. @@ -13,6 +13,9 @@ Required Properties: - #interrupt-cells : Specifies the number of cells needed to encode interrupt source, should be 1 +Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, +use ar7240 for all other SoCs. + Please refer to interrupts.txt in this directory for details of the common Interrupt Controllers bindings used by client devices. @@ -28,3 +31,16 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +Another example: + + interrupt-controller@18060010 { + compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; + reg = <0x18060010 0x4>; + + interrupt-parent = <&cpuintc>; + interrupts = <6>; + + interrupt-controller; + #interrupt-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt index d8ef5bf50f11..7fab84b33531 100644 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt @@ -7,7 +7,8 @@ OHCI and EHCI controllers. Required properties: - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; - "renesas,pci-r8a7791" for the R8A7791 SoC. + "renesas,pci-r8a7791" for the R8A7791 SoC; + "renesas,pci-r8a7794" for the R8A7794 SoC. - reg: A list of physical regions to access the device: the first is the operational registers for the OHCI/EHCI controllers and the second is for the bridge configuration and control registers. diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt index 32aa26f1e434..acbcb452a69a 100644 --- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt @@ -2,7 +2,12 @@ PBIAS internal regulator for SD card dual voltage i/o pads on OMAP SoCs. Required properties: - compatible: - - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7. + - should be "ti,pbias-dra7" for DRA7 + - should be "ti,pbias-omap2" for OMAP2 + - should be "ti,pbias-omap3" for OMAP3 + - should be "ti,pbias-omap4" for OMAP4 + - should be "ti,pbias-omap5" for OMAP5 + - "ti,pbias-omap" is deprecated - reg: pbias register offset from syscon base and size of pbias register. - syscon : phandle of the system control module - regulator-name : should be diff --git a/Documentation/devicetree/bindings/sound/ak4613.txt b/Documentation/devicetree/bindings/sound/ak4613.txt new file mode 100644 index 000000000000..15a919522b42 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ak4613.txt @@ -0,0 +1,17 @@ +AK4613 I2C transmitter + +This device supports I2C mode only. + +Required properties: + +- compatible : "asahi-kasei,ak4613" +- reg : The chip select number on the I2C bus + +Example: + +&i2c { + ak4613: ak4613@0x10 { + compatible = "asahi-kasei,ak4613"; + reg = <0x10>; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/ak4642.txt b/Documentation/devicetree/bindings/sound/ak4642.txt index 623d4e70ae11..340784db6808 100644 --- a/Documentation/devicetree/bindings/sound/ak4642.txt +++ b/Documentation/devicetree/bindings/sound/ak4642.txt @@ -7,7 +7,14 @@ Required properties: - compatible : "asahi-kasei,ak4642" or "asahi-kasei,ak4643" or "asahi-kasei,ak4648" - reg : The chip select number on the I2C bus -Example: +Optional properties: + + - #clock-cells : common clock binding; shall be set to 0 + - clocks : common clock binding; MCKI clock + - clock-frequency : common clock binding; frequency of MCKO + - clock-output-names : common clock binding; MCKO clock name + +Example 1: &i2c { ak4648: ak4648@0x12 { @@ -15,3 +22,16 @@ Example: reg = <0x12>; }; }; + +Example 2: + +&i2c { + ak4643: codec@12 { + compatible = "asahi-kasei,ak4643"; + reg = <0x12>; + #clock-cells = <0>; + clocks = <&audio_clock>; + clock-frequency = <12288000>; + clock-output-names = "ak4643_mcko"; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt index 1173395b5e5c..c57cbd65736c 100644 --- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt +++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt @@ -4,10 +4,12 @@ Required properties: - compatible : "renesas,rcar_sound-<soctype>", fallbacks "renesas,rcar_sound-gen1" if generation1, and "renesas,rcar_sound-gen2" if generation2 + "renesas,rcar_sound-gen3" if generation3 Examples with soctypes are: - "renesas,rcar_sound-r8a7778" (R-Car M1A) - "renesas,rcar_sound-r8a7790" (R-Car H2) - "renesas,rcar_sound-r8a7791" (R-Car M2-W) + - "renesas,rcar_sound-r8a7795" (R-Car H3) - reg : Should contain the register physical address. required register is SRU/ADG/SSI if generation1 @@ -30,6 +32,11 @@ Required properties: - rcar_sound,dai : DAI contents. The number of DAI subnode should be same as HW. see below for detail. +- #sound-dai-cells : it must be 0 if your system is using single DAI + it must be 1 if your system is using multi DAI +- #clock-cells : it must be 0 if your system has audio_clkout + it must be 1 if your system has audio_clkout0/1/2/3 +- clock-frequency : for all audio_clkout0/1/2/3 SSI subnode properties: - interrupts : Should contain SSI interrupt for PIO transfer diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt new file mode 100644 index 000000000000..680144b74ae9 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt @@ -0,0 +1,33 @@ +* Allwinner A10 Codec + +Required properties: +- compatible: must be either "allwinner,sun4i-a10-codec" or + "allwinner,sun7i-a20-codec" +- reg: must contain the registers location and length +- interrupts: must contain the codec interrupt +- dmas: DMA channels for tx and rx dma. See the DMA client binding, + Documentation/devicetree/bindings/dma/dma.txt +- dma-names: should include "tx" and "rx". +- clocks: a list of phandle + clock-specifer pairs, one for each entry + in clock-names. +- clock-names: should contain followings: + - "apb": the parent APB clock for this controller + - "codec": the parent module clock +- routing : A list of the connections between audio components. Each + entry is a pair of strings, the first being the connection's sink, + the second being the connection's source. + + +Example: +codec: codec@01c22c00 { + #sound-dai-cells = <0>; + compatible = "allwinner,sun7i-a20-codec"; + reg = <0x01c22c00 0x40>; + interrupts = <0 30 4>; + clocks = <&apb0_gates 0>, <&codec_clk>; + clock-names = "apb", "codec"; + dmas = <&dma 0 19>, <&dma 0 19>; + dma-names = "rx", "tx"; + routing = "Headphone Jack", "HP Right", + "Headphone Jack", "HP Left"; +}; diff --git a/Documentation/devicetree/bindings/sound/tdm-slot.txt b/Documentation/devicetree/bindings/sound/tdm-slot.txt index 6a2c84247f91..34cf70e2cbc4 100644 --- a/Documentation/devicetree/bindings/sound/tdm-slot.txt +++ b/Documentation/devicetree/bindings/sound/tdm-slot.txt @@ -4,11 +4,15 @@ This specifies audio DAI's TDM slot. TDM slot properties: dai-tdm-slot-num : Number of slots in use. -dai-tdm-slot-width : Width in bits for each slot. +dai-tdm-slot-width : Width in bits for each slot. +dai-tdm-slot-tx-mask : Transmit direction slot mask, optional +dai-tdm-slot-rx-mask : Receive direction slot mask, optional For instance: dai-tdm-slot-num = <2>; dai-tdm-slot-width = <8>; + dai-tdm-slot-tx-mask = <0 1>; + dai-tdm-slot-rx-mask = <1 0>; And for each spcified driver, there could be one .of_xlate_tdm_slot_mask() to specify a explicit mapping of the channels and the slots. If it's absent @@ -18,3 +22,8 @@ tx and rx masks. For snd_soc_of_xlate_tdm_slot_mask(), the tx and rx masks will use a 1 bit for an active slot as default, and the default active bits are at the LSB of the masks. + +The explicit masks are given as array of integers, where the first +number presents bit-0 (LSB), second presents bit-1, etc. Any non zero +number is considered 1 and 0 is 0. snd_soc_of_xlate_tdm_slot_mask() +does not do anything, if either mask is set non zero value. diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index 8f771441be60..705075da2f10 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings: - renesas,tx-fifo-size : Overrides the default tx fifo size given in words (default is 64) - renesas,rx-fifo-size : Overrides the default rx fifo size given in words - (default is 64, or 256 on R-Car Gen2) + (default is 64) Pinctrl properties might be needed, too. See Documentation/devicetree/bindings/pinctrl/renesas,*. diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt index dcefc438272f..6160ffbcb3d3 100644 --- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt +++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt @@ -15,17 +15,18 @@ Required properties: - interrupts: Should contain spi interrupt - clocks: phandles to input clocks. - The first should be <&topckgen CLK_TOP_SPI_SEL>. - The second should be one of the following. + The first should be one of the following. It's PLL. - <&clk26m>: specify parent clock 26MHZ. - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ. It's the default one. - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ. - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ. - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ. + The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux. + The third is <&pericfg CLK_PERI_SPI0>. It's clock gate. -- clock-names: shall be "spi-clk" for the controller clock, and - "parent-clk" for the parent clock. +- clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the + muxes clock, and "spi-clk" for the clock gate. Optional properties: - mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi @@ -44,8 +45,11 @@ spi: spi@1100a000 { #size-cells = <0>; reg = <0 0x1100a000 0 0x1000>; interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>; - clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>; - clock-names = "spi-clk", "parent-clk"; + clocks = <&topckgen CLK_TOP_SYSPLL3_D2>, + <&topckgen CLK_TOP_SPI_SEL>, + <&pericfg CLK_PERI_SPI0>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + mediatek,pad-select = <0>; status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 8a49362dea6e..41b817f7b670 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt @@ -55,19 +55,11 @@ of heat dissipation). For example a fan's cooling states correspond to the different fan speeds possible. Cooling states are referred to by single unsigned integers, where larger numbers mean greater heat dissipation. The precise set of cooling states associated with a device -(as referred to be the cooling-min-state and cooling-max-state +(as referred to by the cooling-min-level and cooling-max-level properties) should be defined in a particular device's binding. For more examples of cooling devices, refer to the example sections below. Required properties: -- cooling-min-state: An integer indicating the smallest - Type: unsigned cooling state accepted. Typically 0. - Size: one cell - -- cooling-max-state: An integer indicating the largest - Type: unsigned cooling state accepted. - Size: one cell - - #cooling-cells: Used to provide cooling device specific information Type: unsigned while referring to it. Must be at least 2, in order Size: one cell to specify minimum and maximum cooling state used @@ -77,6 +69,15 @@ Required properties: See Cooling device maps section below for more details on how consumers refer to cooling devices. +Optional properties: +- cooling-min-level: An integer indicating the smallest + Type: unsigned cooling state accepted. Typically 0. + Size: one cell + +- cooling-max-level: An integer indicating the largest + Type: unsigned cooling state accepted. + Size: one cell + * Trip points The trip node is a node to describe a point in the temperature domain @@ -225,8 +226,8 @@ cpus { 396000 950000 198000 850000 >; - cooling-min-state = <0>; - cooling-max-state = <3>; + cooling-min-level = <0>; + cooling-max-level = <3>; #cooling-cells = <2>; /* min followed by max */ }; ... @@ -240,8 +241,8 @@ cpus { */ fan0: fan@0x48 { ... - cooling-min-state = <0>; - cooling-max-state = <9>; + cooling-min-level = <0>; + cooling-max-level = <9>; #cooling-cells = <2>; /* min followed by max */ }; }; diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt index d71ef07bca5d..a057b75ba4b5 100644 --- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt +++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt @@ -6,6 +6,7 @@ Required properties: "lsi,zevio-usb" "qcom,ci-hdrc" "chipidea,usb2" + "xlnx,zynq-usb-2.20a" - reg: base address and length of the registers - interrupts: interrupt for the USB controller diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index 64a4ca6cf96f..7d48f63db44e 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -5,6 +5,7 @@ Required properties: - "renesas,usbhs-r8a7790" - "renesas,usbhs-r8a7791" - "renesas,usbhs-r8a7794" + - "renesas,usbhs-r8a7795" - reg: Base address and length of the register for the USBHS - interrupts: Interrupt specifier for the USBHS - clocks: A list of phandle + clock specifier pairs diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index ac5f0c34ae00..82d2ac97af74 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -203,6 +203,7 @@ sitronix Sitronix Technology Corporation skyworks Skyworks Solutions, Inc. smsc Standard Microsystems Corporation snps Synopsys, Inc. +socionext Socionext Inc. solidrun SolidRun solomon Solomon Systech Limited sony Sony Corporation diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt index b80606de545a..f59c43b6411b 100644 --- a/Documentation/gpio/board.txt +++ b/Documentation/gpio/board.txt @@ -21,8 +21,8 @@ exact way to do it depends on the GPIO controller providing the GPIOs, see the device tree bindings for your controller. GPIOs mappings are defined in the consumer device's node, in a property named -<function>-gpios, where <function> is the function the driver will request -through gpiod_get(). For example: +either <function>-gpios or <function>-gpio, where <function> is the function +the driver will request through gpiod_get(). For example: foo_device { compatible = "acme,foo"; @@ -31,7 +31,7 @@ through gpiod_get(). For example: <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */ <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */ - power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>; + power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>; }; This property will make GPIOs 15, 16 and 17 available to the driver under the @@ -39,15 +39,24 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the struct gpio_desc *red, *green, *blue, *power; - red = gpiod_get_index(dev, "led", 0); - green = gpiod_get_index(dev, "led", 1); - blue = gpiod_get_index(dev, "led", 2); + red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH); + green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH); + blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH); - power = gpiod_get(dev, "power"); + power = gpiod_get(dev, "power", GPIOD_OUT_HIGH); The led GPIOs will be active-high, while the power GPIO will be active-low (i.e. gpiod_is_active_low(power) will be true). +The second parameter of the gpiod_get() functions, the con_id string, has to be +the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically +looked up by the gpiod functions internally) used in the device tree. With above +"led-gpios" example, use the prefix without the "-" as con_id parameter: "led". + +Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio") +with the string passed in con_id to get the resulting string +(snprintf(... "%s-%s", con_id, gpio_suffixes[]). + ACPI ---- ACPI also supports function names for GPIOs in a similar fashion to DT. @@ -142,13 +151,14 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows: struct gpio_desc *red, *green, *blue, *power; - red = gpiod_get_index(dev, "led", 0); - green = gpiod_get_index(dev, "led", 1); - blue = gpiod_get_index(dev, "led", 2); + red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH); + green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH); + blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH); - power = gpiod_get(dev, "power"); - gpiod_direction_output(power, 1); + power = gpiod_get(dev, "power", GPIOD_OUT_HIGH); -Since the "power" GPIO is mapped as active-low, its actual signal will be 0 -after this code. Contrary to the legacy integer GPIO interface, the active-low -property is handled during mapping and is thus transparent to GPIO consumers. +Since the "led" GPIOs are mapped as active-high, this example will switch their +signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped +as active-low, its actual signal will be 0 after this code. Contrary to the legacy +integer GPIO interface, the active-low property is handled during mapping and is +thus transparent to GPIO consumers. diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt index a206639454ab..e000502fde20 100644 --- a/Documentation/gpio/consumer.txt +++ b/Documentation/gpio/consumer.txt @@ -39,6 +39,9 @@ device that displays digits), an additional index argument can be specified: const char *con_id, unsigned int idx, enum gpiod_flags flags) +For a more detailed description of the con_id parameter in the DeviceTree case +see Documentation/gpio/board.txt + The flags parameter is used to optionally specify a direction and initial value for the GPIO. Values can be: diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775 index f0dd3d2fec96..76add4c9cd68 100644 --- a/Documentation/hwmon/nct6775 +++ b/Documentation/hwmon/nct6775 @@ -32,6 +32,10 @@ Supported chips: Prefix: 'nct6792' Addresses scanned: ISA address retrieved from Super I/O registers Datasheet: Available from Nuvoton upon request + * Nuvoton NCT6793D + Prefix: 'nct6793' + Addresses scanned: ISA address retrieved from Super I/O registers + Datasheet: Available from Nuvoton upon request Authors: Guenter Roeck <linux@roeck-us.net> diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is ABS_MT_POSITION_X := T_X ABS_MT_POSITION_Y := T_Y ABS_MT_TOOL_X := C_X - ABS_MT_TOOL_X := C_Y + ABS_MT_TOOL_Y := C_Y Unfortunately, there is not enough information to specify both the touching ellipse and the tool ellipse, so one has to resort to approximations. One diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt new file mode 100644 index 000000000000..031ef4a63485 --- /dev/null +++ b/Documentation/networking/vrf.txt @@ -0,0 +1,96 @@ +Virtual Routing and Forwarding (VRF) +==================================== +The VRF device combined with ip rules provides the ability to create virtual +routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the +Linux network stack. One use case is the multi-tenancy problem where each +tenant has their own unique routing tables and in the very least need +different default gateways. + +Processes can be "VRF aware" by binding a socket to the VRF device. Packets +through the socket then use the routing table associated with the VRF +device. An important feature of the VRF device implementation is that it +impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected +(ie., they do not need to be run in each VRF). The design also allows +the use of higher priority ip rules (Policy Based Routing, PBR) to take +precedence over the VRF device rules directing specific traffic as desired. + +In addition, VRF devices allow VRFs to be nested within namespaces. For +example network namespaces provide separation of network interfaces at L1 +(Layer 1 separation), VLANs on the interfaces within a namespace provide +L2 separation and then VRF devices provide L3 separation. + +Design +------ +A VRF device is created with an associated route table. Network interfaces +are then enslaved to a VRF device: + + +-----------------------------+ + | vrf-blue | ===> route table 10 + +-----------------------------+ + | | | + +------+ +------+ +-------------+ + | eth1 | | eth2 | ... | bond1 | + +------+ +------+ +-------------+ + | | + +------+ +------+ + | eth8 | | eth9 | + +------+ +------+ + +Packets received on an enslaved device and are switched to the VRF device +using an rx_handler which gives the impression that packets flow through +the VRF device. Similarly on egress routing rules are used to send packets +to the VRF device driver before getting sent out the actual interface. This +allows tcpdump on a VRF device to capture all packets into and out of the +VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied +using the VRF device to specify rules that apply to the VRF domain as a whole. + +[1] Packets in the forwarded state do not flow through the device, so those + packets are not seen by tcpdump. Will revisit this limitation in a + future release. + +[2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev + set to real ingress device and egress is limited to NF_INET_POST_ROUTING. + Will revisit this limitation in a future release. + + +Setup +----- +1. VRF device is created with an association to a FIB table. + e.g, ip link add vrf-blue type vrf table 10 + ip link set dev vrf-blue up + +2. Rules are added that send lookups to the associated FIB table when the + iif or oif is the VRF device. e.g., + ip ru add oif vrf-blue table 10 + ip ru add iif vrf-blue table 10 + + Set the default route for the table (and hence default route for the VRF). + e.g, ip route add table 10 prohibit default + +3. Enslave L3 interfaces to a VRF device. + e.g, ip link set dev eth1 master vrf-blue + + Local and connected routes for enslaved devices are automatically moved to + the table associated with VRF device. Any additional routes depending on + the enslaved device will need to be reinserted following the enslavement. + +4. Additional VRF routes are added to associated table. + e.g., ip route add table 10 ... + + +Applications +------------ +Applications that are to work within a VRF need to bind their socket to the +VRF device: + + setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1); + +or to specify the output device using cmsg and IP_PKTINFO. + + +Limitations +----------- +VRF device currently only works for IPv4. Support for IPv6 is under development. + +Index of original ingress interface is not available via cmsg. Will address +soon. diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt @@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned (alternatively, the runtime_suspend() callback will have to check if the device should really be suspended and return -EAGAIN if that is not the case). -The runtime PM of PCI devices is disabled by default. It is also blocked by -pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI -driver implements the runtime PM callbacks and intends to use the runtime PM -framework provided by the PM core and the PCI subsystem, it should enable this -feature by executing the pm_runtime_enable() helper function. However, the -driver should not call the pm_runtime_allow() helper function unblocking -the runtime PM of the device. Instead, it should allow user space or some -platform-specific code to do that (user space can do it via sysfs), although -once it has called pm_runtime_enable(), it must be prepared to handle the +The runtime PM of PCI devices is enabled by default by the PCI core. PCI +device drivers do not need to enable it and should not attempt to do so. +However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() +helper function. In addition to that, the runtime PM usage counter of +each PCI device is incremented by local_pci_probe() before executing the +probe callback provided by the device's driver. + +If a PCI driver implements the runtime PM callbacks and intends to use the +runtime PM framework provided by the PM core and the PCI subsystem, it needs +to decrement the device's runtime PM usage counter in its probe callback +function. If it doesn't do that, the counter will always be different from +zero for the device and it will never be runtime-suspended. The simplest +way to do that is by calling pm_runtime_put_noidle(), but if the driver +wants to schedule an autosuspend right away, for example, it may call +pm_runtime_put_autosuspend() instead for this purpose. Generally, it +just needs to call a function that decrements the devices usage counter +from its probe routine to make runtime PM work for the device. + +It is important to remember that the driver's runtime_suspend() callback +may be executed right after the usage counter has been decremented, because +user space may already have cuased the pm_runtime_allow() helper function +unblocking the runtime PM of the device to run via sysfs, so the driver must +be prepared to cope with that. + +The driver itself should not call pm_runtime_allow(), though. Instead, it +should let user space or some platform-specific code do that (user space can +do it via sysfs as stated above), but it must be prepared to handle the runtime PM of the device correctly as soon as pm_runtime_allow() is called -(which may happen at any time). [It also is possible that user space causes -pm_runtime_allow() to be called via sysfs before the driver is loaded, so in -fact the driver has to be prepared to handle the runtime PM of the device as -soon as it calls pm_runtime_enable().] +(which may happen at any time, even before the driver is loaded). + +When the driver's remove callback runs, it has to balance the decrementation +of the device's runtime PM usage counter at the probe time. For this reason, +if it has decremented the counter in its probe callback, it must run +pm_runtime_get_noresume() in its remove callback. [Since the core carries +out a runtime resume of the device and bumps up the device's usage counter +before running the driver's remove callback, the runtime PM of the device +is effectively disabled for the duration of the remove execution and all +runtime PM helper functions incrementing the device's usage counter are +then effectively equivalent to pm_runtime_get_noresume().] The runtime PM framework works by processing requests to suspend or resume devices, or to check if they are idle (in which cases it is reasonable to diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c @@ -18,6 +18,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ #include <errno.h> #include <fcntl.h> #include <inttypes.h> diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index f4cb0b2d5cd7..477927becacb 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -15,8 +15,8 @@ The updated API replacements are: DEFINE_STATIC_KEY_TRUE(key); DEFINE_STATIC_KEY_FALSE(key); -static_key_likely() -statick_key_unlikely() +static_branch_likely() +static_branch_unlikely() 0) Abstract diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 6294b5186ae5..809ab6efcc74 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -54,13 +54,15 @@ default_qdisc -------------- The default queuing discipline to use for network devices. This allows -overriding the default queue discipline of pfifo_fast with an -alternative. Since the default queuing discipline is created with the -no additional parameters so is best suited to queuing disciplines that -work well without configuration like stochastic fair queue (sfq), -CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines -like Hierarchical Token Bucket or Deficit Round Robin which require setting -up classes and bandwidths. +overriding the default of pfifo_fast with an alternative. Since the default +queuing discipline is created without additional parameters so is best suited +to queuing disciplines that work well without configuration like stochastic +fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use +queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin +which require setting up classes and bandwidths. Note that physical multiqueue +interfaces still use mq as root qdisc, which in turn uses this default for its +leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead +default to noqueue. Default: pfifo_fast busy_read diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt index c3797b529991..a1ce2235f121 100644 --- a/Documentation/thermal/power_allocator.txt +++ b/Documentation/thermal/power_allocator.txt @@ -4,7 +4,7 @@ Power allocator governor tunables Trip points ----------- -The governor requires the following two passive trip points: +The governor works optimally with the following two passive trip points: 1. "switch on" trip point: temperature above which the governor control loop starts operating. This is the first passive trip diff --git a/MAINTAINERS b/MAINTAINERS index 7ba7ab749c85..9de185da5f5b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -615,9 +615,8 @@ F: Documentation/hwmon/fam15h_power F: drivers/hwmon/fam15h_power.c AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER -M: Thomas Dahlmann <dahlmann.thomas@arcor.de> L: linux-geode@lists.infradead.org (moderated for non-subscribers) -S: Supported +S: Orphan F: drivers/usb/gadget/udc/amd5536udc.* AMD GEODE PROCESSOR/CHIPSET SUPPORT @@ -808,6 +807,13 @@ S: Maintained F: drivers/video/fbdev/arcfb.c F: drivers/video/fbdev/core/fb_defio.c +ARCNET NETWORK LAYER +M: Michael Grzeschik <m.grzeschik@pengutronix.de> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/arcnet/ +F: include/uapi/linux/if_arcnet.h + ARM MFM AND FLOPPY DRIVERS M: Ian Molton <spyro@f2s.com> S: Maintained @@ -888,11 +894,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -ARM/Allwinner A1X SoC support +ARM/Allwinner sunXi SoC support M: Maxime Ripard <maxime.ripard@free-electrons.com> +M: Chen-Yu Tsai <wens@csie.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -N: sun[x4567]i +N: sun[x456789]i ARM/Allwinner SoC Clock Support M: Emilio López <emilio@elopez.com.ar> @@ -3394,7 +3401,6 @@ F: drivers/staging/dgnc/ DIGI EPCA PCI PRODUCTS M: Lidza Louina <lidza.louina@gmail.com> -M: Mark Hounschell <markh@compro.net> M: Daeseok Youn <daeseok.youn@gmail.com> L: driverdev-devel@linuxdriverproject.org S: Maintained @@ -3586,6 +3592,13 @@ F: drivers/gpu/drm/i915/ F: include/drm/i915* F: include/uapi/drm/i915* +DRM DRIVERS FOR ATMEL HLCDC +M: Boris Brezillon <boris.brezillon@free-electrons.com> +L: dri-devel@lists.freedesktop.org +S: Supported +F: drivers/gpu/drm/atmel-hlcdc/ +F: Documentation/devicetree/bindings/drm/atmel/ + DRM DRIVERS FOR EXYNOS M: Inki Dae <inki.dae@samsung.com> M: Joonyoung Shim <jy0922.shim@samsung.com> @@ -3614,6 +3627,14 @@ S: Maintained F: drivers/gpu/drm/imx/ F: Documentation/devicetree/bindings/drm/imx/ +DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets) +M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> +L: dri-devel@lists.freedesktop.org +T: git git://github.com/patjak/drm-gma500 +S: Maintained +F: drivers/gpu/drm/gma500 +F: include/drm/gma500* + DRM DRIVERS FOR NVIDIA TEGRA M: Thierry Reding <thierry.reding@gmail.com> M: Terje Bergström <tbergstrom@nvidia.com> @@ -3998,7 +4019,7 @@ S: Maintained F: sound/usb/misc/ua101.c EXTENSIBLE FIRMWARE INTERFACE (EFI) -M: Matt Fleming <matt.fleming@intel.com> +M: Matt Fleming <matt@codeblueprint.co.uk> L: linux-efi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git S: Maintained @@ -4013,7 +4034,7 @@ F: include/linux/efi*.h EFI VARIABLE FILESYSTEM M: Matthew Garrett <matthew.garrett@nebula.com> M: Jeremy Kerr <jk@ozlabs.org> -M: Matt Fleming <matt.fleming@intel.com> +M: Matt Fleming <matt@codeblueprint.co.uk> T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git L: linux-efi@vger.kernel.org S: Maintained @@ -5952,7 +5973,7 @@ F: virt/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V M: Joerg Roedel <joro@8bytes.org> L: kvm@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c @@ -5960,7 +5981,7 @@ F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC M: Alexander Graf <agraf@suse.com> L: kvm-ppc@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ T: git git://github.com/agraf/linux-2.6.git S: Supported F: arch/powerpc/include/asm/kvm* @@ -6452,11 +6473,11 @@ F: drivers/hwmon/ltc4261.c LTP (Linux Test Project) M: Mike Frysinger <vapier@gentoo.org> M: Cyril Hrubis <chrubis@suse.cz> -M: Wanlong Gao <gaowanlong@cn.fujitsu.com> +M: Wanlong Gao <wanlong.gao@gmail.com> M: Jan Stancek <jstancek@redhat.com> M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> M: Alexey Kodanev <alexey.kodanev@oracle.com> -L: ltp-list@lists.sourceforge.net (subscribers-only) +L: ltp@lists.linux.it (subscribers-only) W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git S: Maintained @@ -6773,7 +6794,6 @@ F: drivers/scsi/megaraid/ MELLANOX ETHERNET DRIVER (mlx4_en) M: Amir Vadai <amirv@mellanox.com> -M: Ido Shamay <idos@mellanox.com> L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -8500,7 +8520,6 @@ F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER -M: Shahed Shaikh <shahed.shaikh@qlogic.com> M: Dept-GELinuxNICDev@qlogic.com L: netdev@vger.kernel.org S: Supported @@ -9097,6 +9116,15 @@ S: Supported F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt F: drivers/net/ethernet/synopsys/dwc_eth_qos.c +SYNOPSYS DESIGNWARE I2C DRIVER +M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> +M: Jarkko Nikula <jarkko.nikula@linux.intel.com> +M: Mika Westerberg <mika.westerberg@linux.intel.com> +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-designware-* +F: include/linux/platform_data/i2c-designware.h + SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Seungwon Jeon <tgih.jun@samsung.com> M: Jaehoon Chung <jh80.chung@samsung.com> @@ -9904,13 +9932,12 @@ F: drivers/staging/media/lirc/ STAGING - LUSTRE PARALLEL FILESYSTEM M: Oleg Drokin <oleg.drokin@intel.com> M: Andreas Dilger <andreas.dilger@intel.com> -L: HPDD-discuss@lists.01.org (moderated for non-subscribers) -W: http://lustre.opensfs.org/ +L: lustre-devel@lists.lustre.org (moderated for non-subscribers) +W: http://wiki.lustre.org/ S: Maintained F: drivers/staging/lustre STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) -M: Julian Andres Klode <jak@jak-linux.org> M: Marc Dietrich <marvin24@gmx.de> L: ac100@lists.launchpad.net (moderated for non-subscribers) L: linux-tegra@vger.kernel.org @@ -10338,6 +10365,16 @@ F: include/uapi/linux/thermal.h F: include/linux/cpu_cooling.h F: Documentation/devicetree/bindings/thermal/ +THERMAL/CPU_COOLING +M: Amit Daniel Kachhap <amit.kachhap@gmail.com> +M: Viresh Kumar <viresh.kumar@linaro.org> +M: Javi Merino <javi.merino@arm.com> +L: linux-pm@vger.kernel.org +S: Supported +F: Documentation/thermal/cpu-cooling-api.txt +F: drivers/thermal/cpu_cooling.c +F: include/linux/cpu_cooling.h + THINGM BLINK(1) USB RGB LED DRIVER M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> S: Maintained @@ -11187,7 +11224,7 @@ F: drivers/vlynq/vlynq.c F: include/linux/vlynq.h VME SUBSYSTEM -M: Martyn Welch <martyn.welch@ge.com> +M: Martyn Welch <martyn@welchs.me.uk> M: Manohar Vanga <manohar.vanga@gmail.com> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> L: devel@driverdev.osuosl.org @@ -11239,7 +11276,6 @@ VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood <lgirdwood@gmail.com> M: Mark Brown <broonie@kernel.org> L: linux-kernel@vger.kernel.org -W: http://opensource.wolfsonmicro.com/node/15 W: http://www.slimlogic.co.uk/?p=48 T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git S: Supported @@ -11253,6 +11289,7 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/vrf.c F: include/net/vrf.h +F: Documentation/networking/vrf.txt VT1211 HARDWARE MONITOR DRIVER M: Juerg Haefliger <juergh@gmail.com> @@ -11364,21 +11401,10 @@ W: http://oops.ghostprotocols.net:81/blog S: Maintained F: drivers/net/wireless/wl3501* -WM97XX TOUCHSCREEN DRIVERS -M: Mark Brown <broonie@kernel.org> -M: Liam Girdwood <lrg@slimlogic.co.uk> -L: linux-input@vger.kernel.org -T: git git://opensource.wolfsonmicro.com/linux-2.6-touch -W: http://opensource.wolfsonmicro.com/node/7 -S: Supported -F: drivers/input/touchscreen/*wm97* -F: include/linux/wm97xx.h - WOLFSON MICROELECTRONICS DRIVERS L: patches@opensource.wolfsonmicro.com -T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc -T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus -W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices +T: git https://github.com/CirrusLogic/linux-drivers.git +W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported F: Documentation/hwmon/wm83?? F: arch/arm/mach-s3c64xx/mach-crag6410* @@ -11649,6 +11675,7 @@ F: drivers/tty/serial/zs.* ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR M: Minchan Kim <minchan@kernel.org> M: Nitin Gupta <ngupta@vflare.org> +R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> L: linux-mm@kvack.org S: Maintained F: mm/zsmalloc.c @@ -1,8 +1,8 @@ VERSION = 4 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Hurr durr I'ma sheep +EXTRAVERSION = -rc7 +NAME = Blurry Fish Butt # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index f05bdb4b1cb9..ff4049155c84 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset, unsigned long size) { return ioremap(offset, size); -} +} + +#define ioremap_uc ioremap_nocache static inline void iounmap(volatile void __iomem *addr) { diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1521..902e6ab00a06 100644 --- a/arch/alpha/include/asm/word-at-a-time.h +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) #endif } +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2804648c8ff4..2d6efcff3bf3 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,6 +117,6 @@ handle_irq(int irq) } irq_enter(); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); irq_exit(); } diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index cded02c890aa..5f387ee5b5c5 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -242,7 +242,12 @@ pci_restore_srm_config(void) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev; + struct pci_dev *dev = bus->self; + + if (pci_has_flag(PCI_PROBE_ONLY) && dev && + (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_read_bridge_bases(bus); + } list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); diff --git a/arch/alpha/lib/udelay.c b/arch/alpha/lib/udelay.c index 69d52aa37bae..f2d81ff38aa6 100644 --- a/arch/alpha/lib/udelay.c +++ b/arch/alpha/lib/udelay.c @@ -30,6 +30,7 @@ __delay(int loops) " bgt %0,1b" : "=&r" (tmp), "=r" (loops) : "1"(loops)); } +EXPORT_SYMBOL(__delay); #ifdef CONFIG_SMP #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -48,4 +48,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index d9e44b62df05..4ffd1855f1bd 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -252,7 +252,7 @@ static struct irq_chip idu_irq_chip = { static int idu_first_irq; -static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc) +static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int core_irq = irq_desc_get_irq(desc); diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7451b447cc2d..2c2b28ee4811 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -54,6 +54,14 @@ AS += -EL LD += -EL endif +# +# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and +# later may result in code being generated that handles signed short and signed +# char struct members incorrectly. So disable it. +# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932) +# +KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra) + # This selects which instruction set is used. # Note that GCC does not numerically define an architecture version # macro, but instead defines a whole series of macros which makes diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 233159d2eaab..bb8fa023d574 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-hackberry.dtb \ sun4i-a10-hyundai-a7hd.dtb \ sun4i-a10-inet97fv2.dtb \ - sun4i-a10-itead-iteaduino-plus.dts \ + sun4i-a10-itead-iteaduino-plus.dtb \ sun4i-a10-jesurun-q5.dtb \ sun4i-a10-marsboard.dtb \ sun4i-a10-mini-xplus.dtb \ diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi index 4d28fc3aac69..5dd084f3c81c 100644 --- a/arch/arm/boot/dts/am335x-phycore-som.dtsi +++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi @@ -252,10 +252,10 @@ }; vdd1_reg: regulator@2 { - /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ + /* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <912500>; - regulator-max-microvolt = <1312500>; + regulator-max-microvolt = <1378000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 3a05b94f59ed..d55e3ea89fda 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -98,13 +98,6 @@ pinctrl-0 = <&extcon_usb1_pins>; }; - extcon_usb2: extcon_usb2 { - compatible = "linux,extcon-usb-gpio"; - id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&extcon_usb2_pins>; - }; - hdmi0: connector { compatible = "hdmi-connector"; label = "hdmi"; @@ -326,12 +319,6 @@ >; }; - extcon_usb2_pins: extcon_usb2_pins { - pinctrl-single,pins = < - 0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */ - >; - }; - tpd12s015_pins: pinmux_tpd12s015_pins { pinctrl-single,pins = < 0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ @@ -415,11 +402,12 @@ /* SMPS9 unused */ ldo1_reg: ldo1 { - /* VDD_SD */ + /* VDD_SD / VDDSHV8 */ regulator-name = "ldo1"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; + regulator-always-on; }; ldo2_reg: ldo2 { @@ -432,7 +420,7 @@ }; ldo3_reg: ldo3 { - /* VDDA_1V8_PHY */ + /* VDDA_1V8_PHYA */ regulator-name = "ldo3"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -440,6 +428,15 @@ regulator-boot-on; }; + ldo4_reg: ldo4 { + /* VDDA_1V8_PHYB */ + regulator-name = "ldo4"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + ldo9_reg: ldo9 { /* VDD_RTC */ regulator-name = "ldo9"; @@ -495,6 +492,14 @@ gpio-controller; #gpio-cells = <2>; }; + + extcon_usb2: tps659038_usb { + compatible = "ti,palmas-usb-vid"; + ti,enable-vbus-detection; + ti,enable-id-detection; + id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>; + }; + }; tmp102: tmp102@48 { @@ -517,7 +522,8 @@ mcp_rtc: rtc@6f { compatible = "microchip,mcp7941x"; reg = <0x6f>; - interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */ + interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, + <&dra7_pmx_core 0x424>; pinctrl-names = "default"; pinctrl-0 = <&mcp79410_pins_default>; @@ -579,7 +585,6 @@ pinctrl-0 = <&mmc1_pins_default>; vmmc-supply = <&ldo1_reg>; - vmmc_aux-supply = <&vdd_3v3>; bus-width = <4>; cd-gpios = <&gpio6 27 0>; /* gpio 219 */ }; @@ -623,6 +628,14 @@ }; &usb2 { + /* + * Stand alone usage is peripheral only. + * However, with some resistor modifications + * this port can be used via expansion connectors + * as "host" or "dual-role". If so, provide + * the necessary dr_mode override in the expansion + * board's DT. + */ dr_mode = "peripheral"; }; @@ -681,7 +694,7 @@ &hdmi { status = "ok"; - vdda-supply = <&ldo3_reg>; + vdda-supply = <&ldo4_reg>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_pins>; diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts index 89f5a95954ed..4047621b137e 100644 --- a/arch/arm/boot/dts/armada-385-db-ap.dts +++ b/arch/arm/boot/dts/armada-385-db-ap.dts @@ -46,7 +46,7 @@ / { model = "Marvell Armada 385 Access Point Development Board"; - compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x"; + compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380"; chosen { stdout-path = "serial1:115200n8"; diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi index 63a48490e2f9..d4dbd28d348c 100644 --- a/arch/arm/boot/dts/berlin2q.dtsi +++ b/arch/arm/boot/dts/berlin2q.dtsi @@ -152,7 +152,7 @@ }; usb_phy2: phy@a2f400 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xa2f400 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 14>; @@ -170,7 +170,7 @@ }; usb_phy0: phy@b74000 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xb74000 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 12>; @@ -178,7 +178,7 @@ }; usb_phy1: phy@b78000 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xb78000 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 13>; diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index 92bacd3c8fab..109fd4711647 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts @@ -19,10 +19,10 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; - phy-mode = "mii"; + phy-mode = "rgmii"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; - phy-mode = "mii"; + phy-mode = "rgmii"; }; diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 8c4bbc7573df..79838dd8dee7 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts @@ -8,7 +8,7 @@ #include "dm814x.dtsi" / { - model = "DM8148 EVM"; + model = "HP t410 Smart Zero Client"; compatible = "hp,t410", "ti,dm8148"; memory { @@ -19,10 +19,10 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; - phy-mode = "mii"; + phy-mode = "rgmii"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; - phy-mode = "mii"; + phy-mode = "rgmii"; }; diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 972c9c9e885b..7988b42e5764 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -181,9 +181,9 @@ ti,hwmods = "timer3"; }; - control: control@160000 { + control: control@140000 { compatible = "ti,dm814-scm", "simple-bus"; - reg = <0x160000 0x16d000>; + reg = <0x140000 0x16d000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x160000 0x16d000>; @@ -321,9 +321,9 @@ mac-address = [ 00 00 00 00 00 00 ]; }; - phy_sel: cpsw-phy-sel@0x48160650 { + phy_sel: cpsw-phy-sel@48140650 { compatible = "ti,am3352-cpsw-phy-sel"; - reg= <0x48160650 0x4>; + reg= <0x48140650 0x4>; reg-names = "gmii-sel"; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5d65db9ebc2b..e289c706d27d 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -120,9 +120,10 @@ reg = <0x0 0x1400>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x0 0x1400>; pbias_regulator: pbias_regulator { - compatible = "ti,pbias-omap"; + compatible = "ti,pbias-dra7", "ti,pbias-omap"; reg = <0xe00 0x4>; syscon = <&scm_conf>; pbias_mmc_reg: pbias_mmc_omap5 { @@ -1417,7 +1418,7 @@ ti,irqs-safe-map = <0>; }; - mac: ethernet@4a100000 { + mac: ethernet@48484000 { compatible = "ti,dra7-cpsw","ti,cpsw"; ti,hwmods = "gmac"; clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index ca0e3c15977f..294cfe40388d 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi @@ -98,6 +98,7 @@ opp-hz = /bits/ 64 <800000000>; opp-microvolt = <1000000>; clock-latency-ns = <200000>; + opp-suspend; }; opp07 { opp-hz = /bits/ 64 <900000000>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 15aea760c1da..c625e71217aa 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -197,6 +197,7 @@ regulator-name = "P1.8V_LDO_OUT10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo11_reg: LDO11 { diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index 8f4d76c5e11c..1b95da79293c 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -915,6 +915,11 @@ }; }; +&pmu_system_controller { + assigned-clocks = <&pmu_system_controller 0>; + assigned-clock-parents = <&clock CLK_FIN_PLL>; +}; + &rtc { status = "okay"; clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index df9aee92ecf4..1b3d6c769a3c 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -1117,7 +1117,7 @@ interrupt-parent = <&combiner>; interrupts = <3 0>; clock-names = "sysmmu", "master"; - clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; + clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>; power-domains = <&disp_pd>; #iommu-cells = <0>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 79ffdfe712aa..3b43e57845ae 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -472,7 +472,6 @@ */ pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; pinctrl-names = "default"; - samsung,pwm-outputs = <0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 7d5b386b5ae6..8f40c7e549bd 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -878,6 +878,11 @@ }; }; +&pmu_system_controller { + assigned-clocks = <&pmu_system_controller 0>; + assigned-clock-parents = <&clock CLK_FIN_PLL>; +}; + &rtc { status = "okay"; clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index 66e47de5e826..96d7eede412e 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts @@ -36,7 +36,7 @@ pinctrl-0 = <&pinctrl_pmic>; reg = <0x08>; interrupt-parent = <&gpio5>; - interrupts = <23 0x8>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; regulators { sw1_reg: sw1a { regulator-name = "SW1"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c3e3ca9362fb..cd170376eaca 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -15,6 +15,7 @@ #include <dt-bindings/clock/imx5-clock.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> +#include <dt-bindings/interrupt-controller/irq.h> / { aliases { diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 3373fd958e95..a50356243888 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -35,7 +35,6 @@ compatible = "regulator-fixed"; reg = <1>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbh1>; regulator-name = "usbh1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; @@ -47,7 +46,6 @@ compatible = "regulator-fixed"; reg = <2>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbotg>; regulator-name = "usb_otg_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index b738ce0f9d9b..6e444bb873f9 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -588,10 +588,10 @@ status = "disabled"; }; - uart2: serial@30870000 { + uart2: serial@30890000 { compatible = "fsl,imx7d-uart", "fsl,imx6q-uart"; - reg = <0x30870000 0x10000>; + reg = <0x30890000 0x10000>; interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX7D_UART2_ROOT_CLK>, <&clks IMX7D_UART2_ROOT_CLK>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 91146c318798..5b0430041ec6 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -12,7 +12,7 @@ / { model = "LogicPD Zoom DM3730 Torpedo Development Kit"; - compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx"; + compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3"; gpio_keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 548441384d2a..8c77c87660cd 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -67,7 +67,7 @@ timer@c1109940 { compatible = "amlogic,meson6-timer"; - reg = <0xc1109940 0x14>; + reg = <0xc1109940 0x18>; interrupts = <0 10 1>; }; @@ -80,36 +80,37 @@ wdt: watchdog@c1109900 { compatible = "amlogic,meson6-wdt"; reg = <0xc1109900 0x8>; + interrupts = <0 0 1>; }; uart_AO: serial@c81004c0 { compatible = "amlogic,meson-uart"; - reg = <0xc81004c0 0x14>; + reg = <0xc81004c0 0x18>; interrupts = <0 90 1>; clocks = <&clk81>; status = "disabled"; }; - uart_A: serial@c81084c0 { + uart_A: serial@c11084c0 { compatible = "amlogic,meson-uart"; - reg = <0xc81084c0 0x14>; - interrupts = <0 90 1>; + reg = <0xc11084c0 0x18>; + interrupts = <0 26 1>; clocks = <&clk81>; status = "disabled"; }; - uart_B: serial@c81084dc { + uart_B: serial@c11084dc { compatible = "amlogic,meson-uart"; - reg = <0xc81084dc 0x14>; - interrupts = <0 90 1>; + reg = <0xc11084dc 0x18>; + interrupts = <0 75 1>; clocks = <&clk81>; status = "disabled"; }; - uart_C: serial@c8108700 { + uart_C: serial@c1108700 { compatible = "amlogic,meson-uart"; - reg = <0xc8108700 0x14>; - interrupts = <0 90 1>; + reg = <0xc1108700 0x18>; + interrupts = <0 93 1>; clocks = <&clk81>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index 2390f387c271..798dda072b2a 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -56,6 +56,7 @@ reg = <0x270 0x240>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x270 0x240>; scm_clocks: clocks { #address-cells = <1>; @@ -63,7 +64,7 @@ }; pbias_regulator: pbias_regulator { - compatible = "ti,pbias-omap"; + compatible = "ti,pbias-omap2", "ti,pbias-omap"; reg = <0x230 0x4>; syscon = <&scm_conf>; pbias_mmc_reg: pbias_mmc_omap2430 { diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index a5474113cd50..67659a0ed13e 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -202,7 +202,7 @@ tfp410_pins: pinmux_tfp410_pins { pinctrl-single,pins = < - 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ + 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ >; }; diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts index 16e8ce350dda..bb339d1648e0 100644 --- a/arch/arm/boot/dts/omap3-evm-37xx.dts +++ b/arch/arm/boot/dts/omap3-evm-37xx.dts @@ -13,7 +13,7 @@ / { model = "TI OMAP37XX EVM (TMDSEVM3730)"; - compatible = "ti,omap3-evm-37xx", "ti,omap36xx"; + compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3"; memory { device_type = "memory"; diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi index d5e5cd449b16..2230e1c03320 100644 --- a/arch/arm/boot/dts/omap3-igep.dtsi +++ b/arch/arm/boot/dts/omap3-igep.dtsi @@ -78,12 +78,6 @@ >; }; - smsc9221_pins: pinmux_smsc9221_pins { - pinctrl-single,pins = < - 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ - >; - }; - i2c1_pins: pinmux_i2c1_pins { pinctrl-single,pins = < 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi index e458c2185e3c..5ad688c57a00 100644 --- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi +++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi @@ -156,6 +156,12 @@ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ >; }; + + smsc9221_pins: pinmux_smsc9221_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ + >; + }; }; &omap3_pmx_core2 { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 69a40cfc1f29..8a2b25332b8c 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -113,10 +113,22 @@ }; scm_conf: scm_conf@270 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x270 0x330>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x270 0x330>; + + pbias_regulator: pbias_regulator { + compatible = "ti,pbias-omap3", "ti,pbias-omap"; + reg = <0x2b0 0x4>; + syscon = <&scm_conf>; + pbias_mmc_reg: pbias_mmc_omap2430 { + regulator-name = "pbias_mmc_omap2430"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3000000>; + }; + }; scm_clocks: clocks { #address-cells = <1>; @@ -202,17 +214,6 @@ dma-requests = <96>; }; - pbias_regulator: pbias_regulator { - compatible = "ti,pbias-omap"; - reg = <0x2b0 0x4>; - syscon = <&scm_conf>; - pbias_mmc_reg: pbias_mmc_omap2430 { - regulator-name = "pbias_mmc_omap2430"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3000000>; - }; - }; - gpio1: gpio@48310000 { compatible = "ti,omap3-gpio"; reg = <0x48310000 0x200>; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index abc4473e6f8a..5a206c100ce2 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -196,9 +196,10 @@ reg = <0x5a0 0x170>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x5a0 0x170>; pbias_regulator: pbias_regulator { - compatible = "ti,pbias-omap"; + compatible = "ti,pbias-omap4", "ti,pbias-omap"; reg = <0x60 0x4>; syscon = <&omap4_padconf_global>; pbias_mmc_reg: pbias_mmc_omap4 { diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 3cc8f357d5b8..3cb030f9d2c4 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts @@ -174,8 +174,8 @@ i2c5_pins: pinmux_i2c5_pins { pinctrl-single,pins = < - 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ - 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ + 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ + 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ >; }; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 4205a8ac9ddb..4c04389dab32 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -185,9 +185,10 @@ reg = <0x5a0 0xec>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x5a0 0xec>; pbias_regulator: pbias_regulator { - compatible = "ti,pbias-omap"; + compatible = "ti,pbias-omap5", "ti,pbias-omap"; reg = <0x60 0x4>; syscon = <&omap5_padconf_global>; pbias_mmc_reg: pbias_mmc_omap5 { diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index a0b2a79cbfbd..4624d0f2a754 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -1627,6 +1627,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 831525dd39a6..1666c8a6b143 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -1677,6 +1677,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 2fa7a0dc83f7..275c78ccc0f3 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -158,6 +158,7 @@ }; &hdmi { + ddc-i2c-bus = <&i2c5>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi index 810cda743b6d..9c2387b34d0c 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi +++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi @@ -56,7 +56,7 @@ /* VMMCI level-shifter enable */ default_hrefv60_cfg2 { pins = "GPIO169_D22"; - ste,config = <&gpio_out_lo>; + ste,config = <&gpio_out_hi>; }; /* VMMCI level-shifter voltage select */ default_hrefv60_cfg3 { diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index 3efa3b2ebe90..6b914e4bb099 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi @@ -103,48 +103,46 @@ <&clk_s_d0_quadfs 0>, <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>; - ranges; - - sti-hdmi@8d04000 { - compatible = "st,stih407-hdmi"; - reg = <0x8d04000 0x1000>; - reg-names = "hdmi-reg"; - interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; - interrupt-names = "irq"; - clock-names = "pix", - "tmds", - "phy", - "audio", - "main_parent", - "aux_parent"; - - clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - - hdmi,hpd-gpio = <&pio5 3>; - reset-names = "hdmi"; - resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; - ddc = <&hdmiddc>; - - }; - - sti-hda@8d02000 { - compatible = "st,stih407-hda"; - reg = <0x8d02000 0x400>, <0x92b0120 0x4>; - reg-names = "hda-reg", "video-dacs-ctrl"; - clock-names = "pix", - "hddac", - "main_parent", - "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - }; + }; + + sti-hdmi@8d04000 { + compatible = "st,stih407-hdmi"; + reg = <0x8d04000 0x1000>; + reg-names = "hdmi-reg"; + interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; + interrupt-names = "irq"; + clock-names = "pix", + "tmds", + "phy", + "audio", + "main_parent", + "aux_parent"; + + clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; + + hdmi,hpd-gpio = <&pio5 3>; + reset-names = "hdmi"; + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; + ddc = <&hdmiddc>; + }; + + sti-hda@8d02000 { + compatible = "st,stih407-hda"; + reg = <0x8d02000 0x400>, <0x92b0120 0x4>; + reg-names = "hda-reg", "video-dacs-ctrl"; + clock-names = "pix", + "hddac", + "main_parent", + "aux_parent"; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; }; }; }; diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 6f40bc99c22f..8c6e61a27234 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi @@ -178,48 +178,46 @@ <&clk_s_d0_quadfs 0>, <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>; - ranges; - - sti-hdmi@8d04000 { - compatible = "st,stih407-hdmi"; - reg = <0x8d04000 0x1000>; - reg-names = "hdmi-reg"; - interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; - interrupt-names = "irq"; - clock-names = "pix", - "tmds", - "phy", - "audio", - "main_parent", - "aux_parent"; - - clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - - hdmi,hpd-gpio = <&pio5 3>; - reset-names = "hdmi"; - resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; - ddc = <&hdmiddc>; - - }; - - sti-hda@8d02000 { - compatible = "st,stih407-hda"; - reg = <0x8d02000 0x400>, <0x92b0120 0x4>; - reg-names = "hda-reg", "video-dacs-ctrl"; - clock-names = "pix", - "hddac", - "main_parent", - "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - }; + }; + + sti-hdmi@8d04000 { + compatible = "st,stih407-hdmi"; + reg = <0x8d04000 0x1000>; + reg-names = "hdmi-reg"; + interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; + interrupt-names = "irq"; + clock-names = "pix", + "tmds", + "phy", + "audio", + "main_parent", + "aux_parent"; + + clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; + + hdmi,hpd-gpio = <&pio5 3>; + reset-names = "hdmi"; + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; + ddc = <&hdmiddc>; + }; + + sti-hda@8d02000 { + compatible = "st,stih407-hda"; + reg = <0x8d02000 0x400>, <0x92b0120 0x4>; + reg-names = "hda-reg", "video-dacs-ctrl"; + clock-names = "pix", + "hddac", + "main_parent", + "aux_parent"; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; }; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 2bebaa286f9a..391230c3dc93 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -107,7 +107,7 @@ 720000 1200000 528000 1100000 312000 1000000 - 144000 900000 + 144000 1000000 >; #cooling-cells = <2>; cooling-min-level = <0>; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 9d4f86e9c50a..d845bd1448b5 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -234,7 +234,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 246>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 1e204a6de12c..819e2ae2cabe 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -258,7 +258,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 251>; + */ }; apbdma: dma@0,60020000 { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index e058709e6d98..969b828505ae 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -244,7 +244,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 224>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index fe04fb5e155f..c6938ad1b543 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -349,7 +349,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 248>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts index 33963acd7e8f..f80f772d99fb 100644 --- a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts +++ b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts @@ -85,7 +85,7 @@ }; ðsc { - interrupts = <0 50 4>; + interrupts = <0 52 4>; }; &serial0 { diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 96dabcb6c621..996aed3b4eee 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -95,7 +95,7 @@ void it8152_init_irq(void) } } -void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) +void it8152_irq_demux(struct irq_desc *desc) { int bits_pd, bits_lp, bits_ld; int i; diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 304adea4bc52..0e97b4b871f9 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -138,7 +138,7 @@ static struct locomo_dev_info locomo_devices[] = { }, }; -static void locomo_handler(unsigned int __irq, struct irq_desc *desc) +static void locomo_handler(struct irq_desc *desc) { struct locomo *lchip = irq_desc_get_chip_data(desc); int req, i; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 4f290250fa93..3d224941b541 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -196,10 +196,8 @@ static struct sa1111_dev_info sa1111_devices[] = { * active IRQs causes the interrupt output to pulse, the upper levels * will call us again if there are more interrupts to process. */ -static void -sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void sa1111_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); unsigned int stat0, stat1, i; struct sa1111 *sachip = irq_desc_get_handler_data(desc); void __iomem *mapbase = sachip->base + SA1111_INTC; @@ -214,7 +212,7 @@ sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); if (stat0 == 0 && stat1 == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 50c84e1876fc..3f15a5cae167 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -240,7 +240,8 @@ CONFIG_SSI_PROTOCOL=m CONFIG_PINCTRL_SINGLE=y CONFIG_DEBUG_GPIO=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCF857X=y CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y CONFIG_W1=m @@ -350,6 +351,8 @@ CONFIG_USB_MUSB_HDRC=m CONFIG_USB_MUSB_OMAP2PLUS=m CONFIG_USB_MUSB_AM35X=m CONFIG_USB_MUSB_DSPS=m +CONFIG_USB_INVENTRA_DMA=y +CONFIG_USB_TI_CPPI41_DMA=y CONFIG_USB_DWC3=m CONFIG_USB_TEST=m CONFIG_AM335X_PHY_USB=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 7bbf325a4f31..b2bc8e11471d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm - .macro uaccess_save_and_disable, tmp - uaccess_save \tmp - uaccess_disable \tmp - .endm - .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index b274bde24905..e7335a92144e 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -40,6 +40,7 @@ do { \ "2:\t.asciz " #__file "\n" \ ".popsection\n" \ ".pushsection __bug_table,\"a\"\n" \ + ".align 2\n" \ "3:\t.word 1b, 2b\n" \ "\t.hword " #__line ", 0\n" \ ".popsection"); \ diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index e878129f2fee..fc8ba1663601 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include <asm/barrier.h> +#include <asm/thread_info.h> #endif /* @@ -89,7 +90,8 @@ static inline unsigned int get_domain(void) asm( "mrc p15, 0, %0, c3, c0 @ get domain" - : "=r" (domain)); + : "=r" (domain) + : "m" (current_thread_info()->cpu_domain)); return domain; } @@ -98,7 +100,7 @@ static inline void set_domain(unsigned val) { asm volatile( "mcr p15, 0, %0, c3, c0 @ set domain" - : : "r" (val)); + : : "r" (val) : "memory"); isb(); } diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index d36a73d7c0e8..076777ff3daa 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address; struct pci_dev; struct pci_sys_data; -extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); +extern void it8152_irq_demux(struct irq_desc *desc); extern void it8152_init_irq(void); extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index af79da40af2a..9beb92914f4d 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq) pr_crit("unexpected IRQ trap at vector %02x\n", irq); } -void set_irq_flags(unsigned int irq, unsigned int flags); - -#define IRQF_VALID (1 << 0) -#define IRQF_PROBE (1 << 1) -#define IRQF_NOAUTOEN (1 << 2) - #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) #endif diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index dcba0fa5176e..c4072d9f32c7 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -29,21 +29,18 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#if defined(CONFIG_KVM_ARM_MAX_VCPUS) -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#else -#define KVM_MAX_VCPUS 0 -#endif - #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG +#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_VCPU_MAX_FEATURES 2 #include <kvm/arm_vgic.h> +#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -148,6 +145,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 2092ee1e1300..de4634b51456 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int); /* * This is for easy migration, but should be changed in the source */ -#define do_bad_IRQ(irq,desc) \ +#define do_bad_IRQ(desc) \ do { \ raw_spin_lock(&desc->lock); \ - handle_bad_irq(irq, desc); \ + handle_bad_irq(desc); \ raw_spin_unlock(&desc->lock); \ } while(0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d0a1119dcaf3..776757d1604a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -25,7 +25,6 @@ struct task_struct; #include <asm/types.h> -#include <asm/domain.h> typedef unsigned long mm_segment_t; diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 32640c431a08..7cba573c2cc9 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -19,7 +19,7 @@ * This may need to be greater than __NR_last_syscall+1 in order to * account for the padding in the syscall table */ -#define __NR_syscalls (388) +#define __NR_syscalls (392) /* * *NOTE*: This is a ghost syscall private to the kernel. Only the diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 0c3f5a0dafd3..7a2a32a1d5a8 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -414,6 +414,8 @@ #define __NR_memfd_create (__NR_SYSCALL_BASE+385) #define __NR_bpf (__NR_SYSCALL_BASE+386) #define __NR_execveat (__NR_SYSCALL_BASE+387) +#define __NR_userfaultfd (__NR_SYSCALL_BASE+388) +#define __NR_membarrier (__NR_SYSCALL_BASE+389) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 05745eb838c5..fde6c88d560c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -397,6 +397,8 @@ /* 385 */ CALL(sys_memfd_create) CALL(sys_bpf) CALL(sys_execveat) + CALL(sys_userfaultfd) + CALL(sys_membarrier) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 5ff4826cb154..2766183e69df 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -79,26 +79,6 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) handle_IRQ(irq, regs); } -void set_irq_flags(unsigned int irq, unsigned int iflags) -{ - unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; - - if (irq >= nr_irqs) { - pr_err("Trying to set irq flags for IRQ%d\n", irq); - return; - } - - if (iflags & IRQF_VALID) - clr |= IRQ_NOREQUEST; - if (iflags & IRQF_PROBE) - clr |= IRQ_NOPROBE; - if (!(iflags & IRQF_NOAUTOEN)) - clr |= IRQ_NOAUTOEN; - /* Order is clear bits in "clr" then set bits in "set" */ - irq_modify_status(irq, clr, set & ~clr); -} -EXPORT_SYMBOL_GPL(set_irq_flags); - void __init init_IRQ(void) { int ret; diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index a6ad93c9bce3..fd9eefce0a7b 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) if (err) return err; - patch_text((void *)bpt->bpt_addr, - *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); + /* Machine is already stopped, so we can use __patch_text() directly */ + __patch_text((void *)bpt->bpt_addr, + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { - patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); + /* Machine is already stopped, so we can use __patch_text() directly */ + __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); return 0; } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index a3089bacb8d8..7a7c4cea5523 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); +#ifdef CONFIG_CPU_USE_DOMAINS /* * Copy the initial value of the domain access control register * from the current thread: thread->addr_limit will have been @@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, * kernel/fork.c */ thread->cpu_domain = get_domain(); +#endif if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index b6cda06b455f..7b8f2141427b 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -343,15 +343,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, */ thumb = handler & 1; -#if __LINUX_ARM_ARCH__ >= 7 /* - * Clear the If-Then Thumb-2 execution state - * ARM spec requires this to be all 000s in ARM mode - * Snapdragon S4/Krait misbehaves on a Thumb=>ARM - * signal transition without this. + * Clear the If-Then Thumb-2 execution state. ARM spec + * requires this to be all 000s in ARM mode. Snapdragon + * S4/Krait misbehaves on a Thumb=>ARM signal transition + * without this. + * + * We must do this whenever we are running on a Thumb-2 + * capable CPU, which includes ARMv6T2. However, we elect + * to always do this to simplify the code; this field is + * marked UNK/SBZP for older architectures. */ cpsr &= ~PSR_IT_MASK; -#endif if (thumb) { cpsr |= PSR_T_BIT; diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index bfb915d05665..356970f3b25e 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM depends on MMU && OF select PREEMPT_NOTIFIERS select ANON_INODES + select ARM_GIC select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO @@ -45,15 +46,4 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - endif # VIRTUALIZATION diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index ce404a5c3062..78b286994577 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -446,7 +446,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) * Map the VGIC hardware resources before running a vcpu the first * time on this VM. */ - if (unlikely(!vgic_ready(kvm))) { + if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { ret = kvm_vgic_map_resources(kvm); if (ret) return ret; @@ -1080,7 +1080,7 @@ static int init_hyp_mode(void) */ err = kvm_timer_hyp_init(); if (err) - goto out_free_mappings; + goto out_free_context; #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 702740d37465..51a59504bef4 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 ) mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - bic r2, #1 @ Clear ENABLE - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL + isb mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL @@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 ) mcrr p15, 4, r2, r2, c14 @ CNTVOFF 1: + mov r2, #0 @ Clear ENABLE + mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL + @ Allow physical timer/counter access for the host mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 7b4201294187..6984342da13d 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (vma->vm_flags & VM_PFNMAP) { gpa_t gpa = mem->guest_phys_addr + (vm_start - mem->userspace_addr); - phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + - vm_start - vma->vm_start; + phys_addr_t pa; + + pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 4b94b513168d..ad6f6424f1d1 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) { - int i; + int i, matching_cpus = 0; unsigned long mpidr; unsigned long target_affinity; unsigned long target_affinity_mask; @@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) */ kvm_for_each_vcpu(i, tmp, kvm) { mpidr = kvm_vcpu_get_mpidr_aff(tmp); - if (((mpidr & target_affinity_mask) == target_affinity) && - !tmp->arch.pause) { - return PSCI_0_2_AFFINITY_LEVEL_ON; + if ((mpidr & target_affinity_mask) == target_affinity) { + matching_cpus++; + if (!tmp->arch.pause) + return PSCI_0_2_AFFINITY_LEVEL_ON; } } + if (!matching_cpus) + return PSCI_RET_INVALID_PARAMS; + return PSCI_0_2_AFFINITY_LEVEL_OFF; } diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 305d7c6242bb..bfb3703357c5 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -69,14 +69,14 @@ static struct irq_chip pmu_irq_chip = { .irq_ack = pmu_irq_ack, }; -static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void pmu_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); unsigned long cause = readl(PMU_INTERRUPT_CAUSE); + unsigned int irq; cause &= readl(PMU_INTERRUPT_MASK); if (cause == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9bdf54795f05..56978199c479 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -20,6 +20,7 @@ #include <asm/cputype.h> #include <asm/cp15.h> #include <asm/mcpm.h> +#include <asm/smp_plat.h> #include "regs-pmu.h" #include "common.h" @@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; - exynos_cpu_power_up(cpunr); + if (!exynos_cpu_power_state(cpunr)) { + exynos_cpu_power_up(cpunr); + + /* + * This assumes the cluster number of the big cores(Cortex A15) + * is 0 and the Little cores(Cortex A7) is 1. + * When the system was booted from the Little core, + * they should be reset during power up cpu. + */ + if (cluster && + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { + /* + * Before we reset the Little cores, we should wait + * the SPARE2 register is set to 1 because the init + * codes of the iROM will set the register after + * initialization. + */ + while (!pmu_raw_readl(S5P_PMU_SPARE2)) + udelay(10); + + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), + EXYNOS_SWRESET); + } + } + return 0; } diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 4a87e86dec45..7c21760f590f 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -200,15 +200,15 @@ no_clk: args.args_count = 0; child_domain = of_genpd_get_from_provider(&args); if (IS_ERR(child_domain)) - goto next_pd; + continue; if (of_parse_phandle_with_args(np, "power-domains", "#power-domain-cells", 0, &args) != 0) - goto next_pd; + continue; parent_domain = of_genpd_get_from_provider(&args); if (IS_ERR(parent_domain)) - goto next_pd; + continue; if (pm_genpd_add_subdomain(parent_domain, child_domain)) pr_warn("%s failed to add subdomain: %s\n", @@ -216,8 +216,6 @@ no_clk: else pr_info("%s has as child subdomain: %s.\n", parent_domain->name, child_domain->name); -next_pd: - of_node_put(np); } return 0; diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h index b7614333d296..fba9068ed260 100644 --- a/arch/arm/mach-exynos/regs-pmu.h +++ b/arch/arm/mach-exynos/regs-pmu.h @@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr) #define SPREAD_ENABLE 0xF #define SPREAD_USE_STANDWFI 0xF +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8) +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20) + +#define EXYNOS5420_KFC_CORE_RESET(_nr) \ + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) + #define EXYNOS5420_BB_CON1 0x0784 #define EXYNOS5420_BB_SEL_EN BIT(31) #define EXYNOS5420_BB_PMOS_EN BIT(7) diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c index fcd79bc3a3e1..c01fca11b224 100644 --- a/arch/arm/mach-footbridge/isa-irq.c +++ b/arch/arm/mach-footbridge/isa-irq.c @@ -87,13 +87,12 @@ static struct irq_chip isa_hi_chip = { .irq_unmask = isa_unmask_pic_hi_irq, }; -static void -isa_irq_handler(unsigned int irq, struct irq_desc *desc) +static void isa_irq_handler(struct irq_desc *desc) { unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { - do_bad_IRQ(isa_irq, desc); + do_bad_IRQ(desc); return; } diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index 220333ed741d..2478d9f4d92d 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c @@ -126,7 +126,7 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type) return 0; } -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); unsigned int gpio_irq_no, irq_stat; diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c index 45903be6e7b3..16496a071ecb 100644 --- a/arch/arm/mach-imx/3ds_debugboard.c +++ b/arch/arm/mach-imx/3ds_debugboard.c @@ -85,7 +85,7 @@ static struct platform_device smsc_lan9217_device = { .resource = smsc911x_resources, }; -static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) +static void mxc_expio_irq_handler(struct irq_desc *desc) { u32 imr_val; u32 int_valid; diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index 2c0853560bd2..2b147e4bf9c9 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -154,7 +154,7 @@ static inline void mxc_init_imx_uart(void) imx31_add_imx_uart0(&uart_pdata); } -static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx31ads_expio_irq_handler(struct irq_desc *desc) { u32 imr_val; u32 int_valid; diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index 9f89e76dfbb9..f6235b28578c 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c @@ -91,7 +91,7 @@ static void (*write_imipr[])(u32) = { write_imipr_3, }; -static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) +static void iop13xx_msi_handler(struct irq_desc *desc) { int i, j; unsigned long status; diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index cce4cef12b6e..2ae431e8bc1b 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -370,7 +370,7 @@ static struct irq_chip lpc32xx_irq_chip = { .irq_set_wake = lpc32xx_irq_wake }; -static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) +static void lpc32xx_sic1_handler(struct irq_desc *desc) { unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE)); @@ -383,7 +383,7 @@ static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc) } } -static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc) +static void lpc32xx_sic2_handler(struct irq_desc *desc) { unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE)); diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 6373e2bff203..842302df99c1 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -69,8 +69,7 @@ static struct platform_device *devices[] __initdata = { #define DEBUG_IRQ(fmt...) while (0) {} #endif -static void -netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc) +static void netx_hif_demux_handler(struct irq_desc *desc) { unsigned int irq = NETX_IRQ_HIF_CHAINED(0); unsigned int stat; diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c index dfec671b1639..39e20d0ead08 100644 --- a/arch/arm/mach-omap1/fpga.c +++ b/arch/arm/mach-omap1/fpga.c @@ -87,7 +87,7 @@ static void fpga_mask_ack_irq(struct irq_data *d) fpga_ack_irq(d); } -static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc) +static void innovator_fpga_IRQ_demux(struct irq_desc *desc) { u32 stat; int fpga_irq; diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 07d2e100caab..33d1460a5639 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -44,10 +44,12 @@ config SOC_OMAP5 select ARM_CPU_SUSPEND if PM select ARM_GIC select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP + select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER + select PM_OPP if PM + select ZONE_DMA if ARM_LPAE config SOC_AM33XX bool "TI AM33XX" @@ -70,10 +72,14 @@ config SOC_DRA7XX select ARCH_OMAP2PLUS select ARM_CPU_SUSPEND if PM select ARM_GIC + select HAVE_ARM_SCU if SMP select HAVE_ARM_ARCH_TIMER select IRQ_CROSSBAR select ARM_ERRATA_798181 if SMP + select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER + select PM_OPP if PM + select ZONE_DMA if ARM_LPAE config ARCH_OMAP2PLUS bool diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 24c9afc9e8a7..fb219a30c10c 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -20,13 +20,6 @@ #include "common.h" -#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)) -#define intc_of_init NULL -#endif -#ifndef CONFIG_ARCH_OMAP4 -#define gic_of_init NULL -#endif - static const struct of_device_id omap_dt_match_table[] __initconst = { { .compatible = "simple-bus", }, { .compatible = "ti,omap-infra", }, @@ -113,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") MACHINE_END static const char *const omap36xx_boards_compat[] __initconst = { + "ti,omap3630", "ti,omap36xx", NULL, }; @@ -250,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = { }; DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = omap5_map_io, @@ -295,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = { }; DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = dra7xx_map_io, @@ -315,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = { }; DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index e3f713ffb06b..54a5ba54d2ff 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -653,8 +653,12 @@ void __init dra7xxx_check_revision(void) omap_revision = DRA752_REV_ES1_0; break; case 1: - default: omap_revision = DRA752_REV_ES1_1; + break; + case 2: + default: + omap_revision = DRA752_REV_ES2_0; + break; } break; @@ -674,7 +678,7 @@ void __init dra7xxx_check_revision(void) /* Unknown default to latest silicon rev as default*/ pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", __func__, idcode, hawkeye, rev); - omap_revision = DRA752_REV_ES1_1; + omap_revision = DRA752_REV_ES2_0; } sprintf(soc_name, "DRA%03x", omap_rev() >> 16); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 980c9372e6fd..3eaeaca5da05 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -676,6 +676,7 @@ void __init am43xx_init_early(void) void __init am43xx_init_late(void) { omap_common_late_init(); + omap2_clk_enable_autoidle_all(); } #endif diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 4cb8fd9f741f..72ebc4c16bae 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -901,7 +901,8 @@ static int __init omap_device_late_idle(struct device *dev, void *data) if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) return 0; - if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { + if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER && + od->_driver_status != BUS_NOTIFY_BIND_DRIVER) { if (od->_state == OMAP_DEVICE_STATE_ENABLED) { dev_warn(dev, "%s: enabled but no driver. Idling\n", __func__); diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index ea56397599c2..1dfe34654c43 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks) void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) { - omap_sdrc_init(NULL, NULL); + /* + * We still need this for omap2420 and omap3 PM to work, others are + * using drivers/misc/sram.c already. + */ + if (of_machine_is_compatible("ti,omap2420") || + of_machine_is_compatible("ti,omap3")) + omap_sdrc_init(NULL, NULL); + pdata_quirks_check(auxdata_quirks); of_platform_populate(NULL, omap_dt_match_table, omap_auxdata_lookup, NULL); diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 425bfcd67db6..b668719b9b25 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -103,7 +103,8 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { } #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0) #define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1) -#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4) +#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\ + defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)) extern u16 pm44xx_errata; #define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id)) #else diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 257e98c26618..3fc2cbe52113 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -102,7 +102,7 @@ static void omap_prcm_events_filter_priority(unsigned long *events, * dispatched accordingly. Clearing of the wakeup events should be * done by the SoC specific individual handlers. */ -static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_prcm_irq_handler(struct irq_desc *desc) { unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index f97654d11ea5..2d1d3845253c 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -469,6 +469,8 @@ IS_OMAP_TYPE(3430, 0x3430) #define DRA7XX_CLASS 0x07000000 #define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) #define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) +#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8)) +#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) #define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) void omap2xxx_check_revision(void); diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e4d8701f99f9..a55655127ef2 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -297,12 +297,8 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, if (IS_ERR(src)) return PTR_ERR(src); - r = clk_set_parent(timer->fclk, src); - if (r < 0) { - pr_warn("%s: %s cannot set source\n", __func__, oh->name); - clk_put(src); - return r; - } + WARN(clk_set_parent(timer->fclk, src) < 0, + "Cannot set timer parent clock, no PLL clock driver?"); clk_put(src); diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index e5a35f6b83a7..d44d311704ba 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -300,7 +300,7 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || - (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) { + (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) { val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index 70366b35d299..a727282bfa99 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -496,13 +496,13 @@ static struct irq_chip balloon3_irq_chip = { .irq_unmask = balloon3_unmask_irq, }; -static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void balloon3_irq_handler(struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; do { struct irq_data *d = irq_desc_get_irq_data(desc); - struct irq_chip *chip = irq_data_get_chip(d); + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int irq; /* clear useless edge notification */ diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c index 1fa79f1f832d..3221ae15bef7 100644 --- a/arch/arm/mach-pxa/cm-x2xx-pci.c +++ b/arch/arm/mach-pxa/cm-x2xx-pci.c @@ -29,13 +29,12 @@ void __iomem *it8152_base_address; static int cmx2xx_it8152_irq_gpio; -static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void cmx2xx_it8152_irq_demux(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); /* clear our parent irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); - it8152_irq_demux(irq, desc); + it8152_irq_demux(desc); } void __cmx2xx_pci_init_irq(int irq_gpio) diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h index d28fe291233a..07b93fd24474 100644 --- a/arch/arm/mach-pxa/include/mach/addr-map.h +++ b/arch/arm/mach-pxa/include/mach/addr-map.h @@ -44,6 +44,13 @@ */ /* + * DFI Bus for NAND, PXA3xx only + */ +#define NAND_PHYS 0x43100000 +#define NAND_VIRT IOMEM(0xf6300000) +#define NAND_SIZE 0x00100000 + +/* * Internal Memory Controller (PXA27x and later) */ #define IMEMC_PHYS 0x58000000 diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index b070167deef2..4823d972e647 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -120,7 +120,7 @@ static struct irq_chip lpd270_irq_chip = { .irq_unmask = lpd270_unmask_irq, }; -static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void lpd270_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 9a0c8affdadb..d8319b54299a 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -284,7 +284,7 @@ static struct irq_chip pcm990_irq_chip = { .irq_unmask = pcm990_unmask_irq, }; -static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void pcm990_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index ce0f8d6242e2..20ce2d386f17 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -42,6 +42,14 @@ #define PECR_IS(n) ((1 << ((n) * 2)) << 29) extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); + +/* + * NAND NFC: DFI bus arbitration subset + */ +#define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0)) +#define NDCR_ND_ARB_EN (1 << 12) +#define NDCR_ND_ARB_CNTL (1 << 19) + #ifdef CONFIG_PM #define ISRAM_START 0x5c000000 @@ -362,7 +370,12 @@ static struct map_desc pxa3xx_io_desc[] __initdata = { .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), .length = SMEMC_SIZE, .type = MT_DEVICE - } + }, { + .virtual = (unsigned long)NAND_VIRT, + .pfn = __phys_to_pfn(NAND_PHYS), + .length = NAND_SIZE, + .type = MT_DEVICE + }, }; void __init pxa3xx_map_io(void) @@ -419,6 +432,13 @@ static int __init pxa3xx_init(void) */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); + /* + * Disable DFI bus arbitration, to prevent a system bus lock if + * somebody disables the NAND clock (unused clock) while this + * bit remains set. + */ + NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL; + if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 4841d6cefe76..8ab26370107e 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -276,7 +276,7 @@ static inline unsigned long viper_irq_pending(void) viper_irq_enabled_mask; } -static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void viper_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 6f94dd7b4dee..30e62a3f0701 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -105,7 +105,7 @@ static inline unsigned long zeus_irq_pending(void) return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; } -static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void zeus_irq_handler(struct irq_desc *desc) { unsigned int irq; unsigned long pending; diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index f726d4c4e6dd..dc67a7fb3831 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -551,8 +551,7 @@ static void ecard_check_lockup(struct irq_desc *desc) } } -static void -ecard_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ecard_irq_handler(struct irq_desc *desc) { ecard_t *ec; int called = 0; diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c index ced1ab86ac83..2bb08961e934 100644 --- a/arch/arm/mach-s3c24xx/bast-irq.c +++ b/arch/arm/mach-s3c24xx/bast-irq.c @@ -100,9 +100,7 @@ static struct irq_chip bast_pc104_chip = { .irq_ack = bast_pc104_maskack }; -static void -bast_irq_pc104_demux(unsigned int irq, - struct irq_desc *desc) +static void bast_irq_pc104_demux(struct irq_desc *desc) { unsigned int stat; unsigned int irqno; diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index fd63ecfb2f81..ddb30b8434c5 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -388,22 +388,22 @@ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end) } } -static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint0_3(struct irq_desc *desc) { s3c_irq_demux_eint(0, 3); } -static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint4_11(struct irq_desc *desc) { s3c_irq_demux_eint(4, 11); } -static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint12_19(struct irq_desc *desc) { s3c_irq_demux_eint(12, 19); } -static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc) +static void s3c_irq_demux_eint20_27(struct irq_desc *desc) { s3c_irq_demux_eint(20, 27); } diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 6d237b4f7a8e..8411985af9ff 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -166,7 +166,7 @@ static struct sa1100_port_fns neponset_port_fns = { * ensure that the IRQ signal is deasserted before returning. This * is rather unfortunate. */ -static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc) +static void neponset_irq_handler(struct irq_desc *desc) { struct neponset_drvdata *d = irq_desc_get_handler_data(desc); unsigned int irr; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 9769f1eefe3b..00b7f7de28a1 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -365,15 +365,21 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r user: if (LDST_L_BIT(instr)) { unsigned long val; + unsigned int __ua_flags = uaccess_save_and_enable(); + get16t_unaligned_check(val, addr); + uaccess_restore(__ua_flags); /* signed half-word? */ if (instr & 0x40) val = (signed long)((signed short) val); regs->uregs[rd] = val; - } else + } else { + unsigned int __ua_flags = uaccess_save_and_enable(); put16t_unaligned_check(regs->uregs[rd], addr); + uaccess_restore(__ua_flags); + } return TYPE_LDST; @@ -420,14 +426,21 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, user: if (load) { - unsigned long val; + unsigned long val, val2; + unsigned int __ua_flags = uaccess_save_and_enable(); + get32t_unaligned_check(val, addr); + get32t_unaligned_check(val2, addr + 4); + + uaccess_restore(__ua_flags); + regs->uregs[rd] = val; - get32t_unaligned_check(val, addr + 4); - regs->uregs[rd2] = val; + regs->uregs[rd2] = val2; } else { + unsigned int __ua_flags = uaccess_save_and_enable(); put32t_unaligned_check(regs->uregs[rd], addr); put32t_unaligned_check(regs->uregs[rd2], addr + 4); + uaccess_restore(__ua_flags); } return TYPE_LDST; @@ -458,10 +471,15 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg trans: if (LDST_L_BIT(instr)) { unsigned int val; + unsigned int __ua_flags = uaccess_save_and_enable(); get32t_unaligned_check(val, addr); + uaccess_restore(__ua_flags); regs->uregs[rd] = val; - } else + } else { + unsigned int __ua_flags = uaccess_save_and_enable(); put32t_unaligned_check(regs->uregs[rd], addr); + uaccess_restore(__ua_flags); + } return TYPE_LDST; fault: @@ -531,6 +549,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg #endif if (user_mode(regs)) { + unsigned int __ua_flags = uaccess_save_and_enable(); for (regbits = REGMASK_BITS(instr), rd = 0; regbits; regbits >>= 1, rd += 1) if (regbits & 1) { @@ -542,6 +561,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg put32t_unaligned_check(regs->uregs[rd], eaddr); eaddr += 4; } + uaccess_restore(__ua_flags); } else { for (regbits = REGMASK_BITS(instr), rd = 0; regbits; regbits >>= 1, rd += 1) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e62604384945..1a7815e5421b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1249,7 +1249,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; - int i, ret = DMA_ERROR_CODE; + int i; dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) @@ -1257,6 +1257,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) iova = dma_addr; for (i = 0; i < count; ) { + int ret; + unsigned int next_pfn = page_to_pfn(pages[i]) + 1; phys_addr_t phys = page_to_phys(pages[i]); unsigned int len, j; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 876060bcceeb..b8efb8cd1f73 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -614,6 +614,7 @@ load_common: case BPF_LD | BPF_B | BPF_IND: load_order = 0; load_ind: + update_on_xread(ctx); OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); goto load_common; case BPF_LDX | BPF_IMM: diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S index 71df43547659..39c20afad7ed 100644 --- a/arch/arm/nwfpe/entry.S +++ b/arch/arm/nwfpe/entry.S @@ -95,9 +95,10 @@ emulate: reteq r4 @ no, return failure next: + uaccess_enable r3 .Lx1: ldrt r6, [r5], #4 @ get the next instruction and @ increment PC - + uaccess_disable r3 and r2, r6, #0x0F000000 @ test for FP insns teq r2, #0x0C000000 teqne r2, #0x0D000000 diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 2235081a04ee..8861c367d061 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq) d->netdev = &orion_ge00.dev; for (i = 0; i < d->nr_chips; i++) - d->chip[i].host_dev = &orion_ge00_shared.dev; + d->chip[i].host_dev = &orion_ge_mvmdio.dev; orion_switch_device.dev.platform_data = d; platform_device_register(&orion_switch_device); diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 79c33eca09a3..7bd22d8e5b11 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -407,7 +407,7 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) return 0; } -static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); u32 cause, type; diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ad9529cc4203..daa1a65f2eb7 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = { { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP }, { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP }, { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP }, - { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP }, { }, }; MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index f00e08075938..10fd99c568c6 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -98,8 +98,23 @@ ENTRY(privcmd_call) mov r1, r2 mov r2, r3 ldr r3, [sp, #8] + /* + * Privcmd calls are issued by the userspace. We need to allow the + * kernel to access the userspace memory before issuing the hypercall. + */ + uaccess_enable r4 + + /* r4 is loaded now as we use it as scratch register before */ ldr r4, [sp, #4] __HVC(XEN_IMM) + + /* + * Disable userspace access from kernel. This is fine to do it + * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is + * called before. + */ + uaccess_disable r4 + ldm sp!, {r4} ret lr ENDPROC(privcmd_call); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7d95663c0160..07d1811aa03f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -32,6 +32,7 @@ config ARM64 select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_CPU_AUTOPROBE select GENERIC_EARLY_IOREMAP + select GENERIC_IDLE_POLL_SETUP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL @@ -331,6 +332,22 @@ config ARM64_ERRATUM_845719 If unsure, say Y. +config ARM64_ERRATUM_843419 + bool "Cortex-A53: 843419: A load or store might access an incorrect address" + depends on MODULES + default y + help + This option builds kernel modules using the large memory model in + order to avoid the use of the ADRP instruction, which can cause + a subsequent memory access to use an incorrect address on Cortex-A53 + parts up to r0p4. + + Note that the kernel itself must be linked with a version of ld + which fixes potentially affected ADRP instructions through the + use of veneers. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 15ff5b4156fd..d10b5d483022 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -41,6 +41,10 @@ endif CHECKFLAGS += -D__aarch64__ +ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) +KBUILD_CFLAGS_MODULE += -mcmodel=large +endif + # Default value head-y := arch/arm64/kernel/head.o diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index d18ee4259ee5..06a15644be38 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -81,7 +81,7 @@ }; idle-states { - entry-method = "arm,psci"; + entry-method = "psci"; CPU_SLEEP_0: cpu-sleep-0 { compatible = "arm,idle-state"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index a712bea3bf2c..cc093a482aa4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -106,7 +106,7 @@ }; idle-states { - entry-method = "arm,psci"; + entry-method = "psci"; cpu_sleep: cpu-sleep-0 { compatible = "arm,idle-state"; diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 2bb7009bdac7..a57601f9d17c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -43,9 +43,4 @@ static inline void ack_bad_irq(unsigned int irq) irq_err_count++; } -/* - * No arch-specific IRQ flags. - */ -#define set_irq_flags(irq, flags) - #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 7605e095217f..9694f2654593 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -95,6 +95,7 @@ SCTLR_EL2_SA | SCTLR_EL2_I) /* TCR_EL2 Registers bits */ +#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) #define TCR_EL2_PS (7 << 16) #define TCR_EL2_PS_40B (2 << 16) @@ -106,9 +107,10 @@ #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) -#define TCR_EL2_FLAGS (TCR_EL2_PS_40B) +#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B) /* VTCR_EL2 Registers bits */ +#define VTCR_EL2_RES1 (1 << 31) #define VTCR_EL2_PS_MASK (7 << 16) #define VTCR_EL2_TG0_MASK (1 << 14) #define VTCR_EL2_TG0_4K (0 << 14) @@ -147,7 +149,8 @@ */ #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ - VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) + VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ + VTCR_EL2_RES1) #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) #else /* @@ -158,7 +161,8 @@ */ #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ - VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) + VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ + VTCR_EL2_RES1) #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) #endif @@ -168,7 +172,6 @@ #define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) /* Hyp System Trap Register */ -#define HSTR_EL2_TTEE (1 << 16) #define HSTR_EL2_T(x) (1 << x) /* Hyp Coproccessor Trap Register Shifts */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 67fa0de3d483..5e377101f919 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -53,9 +53,7 @@ #define IFSR32_EL2 25 /* Instruction Fault Status Register */ #define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ #define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ -#define TEECR32_EL1 28 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 30 +#define NR_SYS_REGS 28 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 415938dc45cf..ed039688c221 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,19 +30,16 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#if defined(CONFIG_KVM_ARM_MAX_VCPUS) -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#else -#define KVM_MAX_VCPUS 0 -#endif - #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HALT_POLL_NS_DEFAULT 500000 #include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h> +#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS + #define KVM_VCPU_MAX_FEATURES 3 int __attribute_const__ kvm_target_cpu(void); @@ -195,6 +192,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6900b2d95371..26b066690593 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -26,13 +26,9 @@ * Software defined PTE bits definition. */ #define PTE_VALID (_AT(pteval_t, 1) << 0) +#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) -#ifdef CONFIG_ARM64_HW_AFDBM -#define PTE_WRITE (PTE_DBM) /* same as DBM */ -#else -#define PTE_WRITE (_AT(pteval_t, 1) << 57) -#endif #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ /* @@ -83,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) @@ -146,7 +142,7 @@ extern struct page *empty_zero_page; #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #ifdef CONFIG_ARM64_HW_AFDBM -#define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY)) +#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) #else #define pte_hw_dirty(pte) (0) #endif @@ -238,7 +234,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via * the page fault mechanism. Checking the dirty status of a pte becomes: * - * PTE_DIRTY || !PTE_RDONLY + * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) */ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) @@ -500,10 +496,10 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) - newprot |= PTE_DIRTY; + pte = pte_mkdirty(pte); pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 3bc498c250dc..41e58fe3c041 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 388 +#define __NR_compat_syscalls 390 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index cef934a90f17..5b925b761a2a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 387 __SYSCALL(__NR_execveat, compat_sys_execveat) +#define __NR_userfaultfd 388 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 389 +__SYSCALL(__NR_membarrier, sys_membarrier) + +/* + * Please add new compat syscalls above this comment and update + * __NR_compat_syscalls in asm/unistd.h. + */ diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h index 8d1e7236431b..991bf5db2ca1 100644 --- a/arch/arm64/include/uapi/asm/signal.h +++ b/arch/arm64/include/uapi/asm/signal.h @@ -19,6 +19,9 @@ /* Required for AArch32 compatibility. */ #define SA_RESTORER 0x04000000 +#define MINSIGSTKSZ 5120 +#define SIGSTKSZ 16384 + #include <asm-generic/signal.h> #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 9b3b62ac9c24..253021ef2769 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -134,7 +134,7 @@ static int os_lock_notify(struct notifier_block *self, unsigned long action, void *data) { int cpu = (unsigned long)data; - if (action == CPU_ONLINE) + if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) smp_call_function_single(cpu, clear_os_lock, NULL, 1); return NOTIFY_OK; } @@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook) } /* - * Call registered single step handers + * Call registered single step handlers * There is no Syndrome info to check for determining the handler. * So we call all the registered handlers, until the right handler is * found which returns zero. @@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, * Use reader/writer locks instead of plain spinlock. */ static LIST_HEAD(break_hook); -static DEFINE_RWLOCK(break_hook_lock); +static DEFINE_SPINLOCK(break_hook_lock); void register_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_add(&hook->node, &break_hook); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_add_rcu(&hook->node, &break_hook); + spin_unlock(&break_hook_lock); } void unregister_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_del(&hook->node); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&break_hook_lock); + synchronize_rcu(); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) @@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) struct break_hook *hook; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; - read_lock(&break_hook_lock); - list_for_each_entry(hook, &break_hook, node) + rcu_read_lock(); + list_for_each_entry_rcu(hook, &break_hook, node) if ((esr & hook->esr_mask) == hook->esr_val) fn = hook->fn; - read_unlock(&break_hook_lock); + rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..13671a9cf016 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void) */ if (!is_normal_ram(md)) prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) + else if (md->type == EFI_RUNTIME_SERVICES_CODE || + !PAGE_ALIGNED(md->phys_addr)) prot = PAGE_KERNEL_EXEC; else prot = PAGE_KERNEL; diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -178,6 +178,24 @@ ENTRY(ftrace_stub) ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* save return value regs*/ + .macro save_return_regs + sub sp, sp, #64 + stp x0, x1, [sp] + stp x2, x3, [sp, #16] + stp x4, x5, [sp, #32] + stp x6, x7, [sp, #48] + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + add sp, sp, #64 + .endm + /* * void ftrace_graph_caller(void) * @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. */ ENTRY(return_to_handler) - str x0, [sp, #-16]! + save_return_regs mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address - ldr x0, [sp], #16 + restore_return_regs ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a055be6125cf..90d09eddd5b2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -523,6 +523,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif + /* EL2 debug */ + mrs x0, pmcr_el0 // Disable debug access traps + ubfx x0, x0, #11, #5 // to EL2 and allow access to + msr mdcr_el2, x0 // all PMU counters from EL1 + /* Stage-2 translation */ msr vttbr_el2, xzr diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index c97040ecf838..bba85c8f8037 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -872,7 +872,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self, void *hcpu) { int cpu = (long)hcpu; - if (action == CPU_ONLINE) + if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1); return NOTIFY_OK; } diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index f341866aa810..c08b9ad6f429 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn) aarch64_insn_is_bcond(insn)); } -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap) { @@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) unsigned long flags = 0; int ret; - spin_lock_irqsave(&patch_lock, flags); + raw_spin_lock_irqsave(&patch_lock, flags); waddr = patch_map(addr, FIX_TEXT_POKE0); ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); patch_unmap(FIX_TEXT_POKE0); - spin_unlock_irqrestore(&patch_lock, flags); + raw_spin_unlock_irqrestore(&patch_lock, flags); return ret; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 67bf4107f6ef..876eb8df50bf 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, AARCH64_INSN_IMM_ADR); break; +#ifndef CONFIG_ARM64_ERRATUM_843419 case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, AARCH64_INSN_IMM_ADR); break; +#endif case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6bab21f84a9f..232247945b1c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -364,6 +364,8 @@ static void __init relocate_initrd(void) to_free = ram_end - orig_start; size = orig_end - orig_start; + if (!size) + return; /* initrd needs to be relocated completely inside linear mapping */ new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 948f0ad2de23..71ef6dc89ae5 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) /* * VFP save/restore code. + * + * We have to be careful with endianness, since the fpsimd context-switch + * code operates on 128-bit (Q) register values whereas the compat ABI + * uses an array of 64-bit (D) registers. Consequently, we need to swap + * the two halves of each Q register when running on a big-endian CPU. */ +union __fpsimd_vreg { + __uint128_t raw; + struct { +#ifdef __AARCH64EB__ + u64 hi; + u64 lo; +#else + u64 lo; + u64 hi; +#endif + }; +}; + static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) { struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; compat_ulong_t magic = VFP_MAGIC; compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t fpscr, fpexc; - int err = 0; + int i, err = 0; /* * Save the hardware registers to the fpsimd_state structure. @@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) /* * Now copy the FP registers. Since the registers are packed, * we can copy the prefix we want (V0-V15) as it is. - * FIXME: Won't work if big endian. */ - err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, - sizeof(frame->ufp.fpregs)); + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg = { + .raw = fpsimd->vregs[i >> 1], + }; + + __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + } /* Create an AArch32 fpscr from the fpsr and the fpcr. */ fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | @@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) compat_ulong_t magic = VFP_MAGIC; compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t fpscr; - int err = 0; + int i, err = 0; __get_user_error(magic, &frame->magic, err); __get_user_error(size, &frame->size, err); @@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - /* - * Copy the FP registers into the start of the fpsimd_state. - * FIXME: Won't work if big endian. - */ - err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, - sizeof(frame->ufp.fpregs)); + /* Copy the FP registers into the start of the fpsimd_state. */ + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { + union __fpsimd_vreg vreg; + + __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); + __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); + fpsimd.vregs[i >> 1] = vreg.raw; + } /* Extract the fpsr and the fpcr from the fpscr */ __get_user_error(fpscr, &frame->ufp.fpscr, err); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index bfffe8f4bd53..5c7e920e4861 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -41,15 +41,4 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 37c89ea2c572..e5836138ec42 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -433,20 +433,13 @@ mrs x5, ifsr32_el2 stp x4, x5, [x3] - skip_fpsimd_state x8, 3f + skip_fpsimd_state x8, 2f mrs x6, fpexc32_el2 str x6, [x3, #16] -3: - skip_debug_state x8, 2f +2: + skip_debug_state x8, 1f mrs x7, dbgvcr32_el2 str x7, [x3, #24] -2: - skip_tee_state x8, 1f - - add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) - mrs x4, teecr32_el1 - mrs x5, teehbr32_el1 - stp x4, x5, [x3] 1: .endm @@ -466,16 +459,9 @@ msr dacr32_el2, x4 msr ifsr32_el2, x5 - skip_debug_state x8, 2f + skip_debug_state x8, 1f ldr x7, [x3, #24] msr dbgvcr32_el2, x7 -2: - skip_tee_state x8, 1f - - add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) - ldp x4, x5, [x3] - msr teecr32_el1, x4 - msr teehbr32_el1, x5 1: .endm @@ -570,8 +556,6 @@ alternative_endif mrs x3, cntv_ctl_el0 and x3, x3, #3 str w3, [x0, #VCPU_TIMER_CNTV_CTL] - bic x3, x3, #1 // Clear Enable - msr cntv_ctl_el0, x3 isb @@ -579,6 +563,9 @@ alternative_endif str x3, [x0, #VCPU_TIMER_CNTV_CVAL] 1: + // Disable the virtual timer + msr cntv_ctl_el0, xzr + // Allow physical timer/counter access for the host mrs x2, cnthctl_el2 orr x2, x2, #3 @@ -753,6 +740,9 @@ ENTRY(__kvm_vcpu_run) // Guest context add x2, x0, #VCPU_CONTEXT + // We must restore the 32-bit state before the sysregs, thanks + // to Cortex-A57 erratum #852523. + restore_guest_32bit_state bl __restore_sysregs skip_debug_state x3, 1f @@ -760,7 +750,6 @@ ENTRY(__kvm_vcpu_run) kern_hyp_va x3 bl __restore_debug 1: - restore_guest_32bit_state restore_guest_regs // That's it, no more messing around. diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b41607d270ac..d03d3af17e7e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -272,7 +272,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; - if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -314,7 +314,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; - if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; @@ -358,7 +358,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; - if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -400,7 +400,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; - if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -539,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), trap_dbgauthstatus_el1 }, - /* TEECR32_EL1 */ - { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), - NULL, reset_val, TEECR32_EL1, 0 }, - /* TEEHBR32_EL1 */ - { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), - NULL, reset_val, TEEHBR32_EL1, 0 }, - /* MDCCSR_EL1 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), trap_raz_wi }, diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0bcc4bc94b4a..99224dcebdc5 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; - if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { + if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) { struct page *page; void *addr; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index aba9ead1384c..9fadf6d7039b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -287,6 +287,7 @@ retry: * starvation. */ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -20,4 +20,5 @@ generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index d51ff8f1c541..96cabad68489 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c @@ -144,7 +144,7 @@ static struct irq_chip eic_chip = { .irq_set_type = eic_set_irq_type, }; -static void demux_eic_irq(unsigned int irq, struct irq_desc *desc) +static void demux_eic_irq(struct irq_desc *desc) { struct eic *eic = irq_desc_get_handler_data(desc); unsigned long status, pending; diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index 157a5e0e789f..4f61378c3453 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c @@ -281,7 +281,7 @@ static struct irq_chip gpio_irqchip = { .irq_set_type = gpio_irq_type, }; -static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct pio_device *pio = irq_desc_get_chip_data(desc); unsigned gpio_irq; diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -46,4 +46,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h index 4b2a992794d7..d2f90c72378e 100644 --- a/arch/blackfin/include/asm/irq_handler.h +++ b/arch/blackfin/include/asm/irq_handler.h @@ -60,7 +60,7 @@ extern void bfin_internal_mask_irq(unsigned int irq); extern void bfin_internal_unmask_irq(unsigned int irq); struct irq_desc; -extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *); -extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *); +extern void bfin_demux_mac_status_irq(struct irq_desc *); +extern void bfin_demux_gpio_irq(struct irq_desc *); #endif diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 0ba25764b8c0..052cde5ed2e4 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -107,7 +107,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) * than crashing, do something sensible. */ if (irq >= NR_IRQS) - handle_bad_irq(irq, &bad_irq_desc); + handle_bad_irq(&bad_irq_desc); else generic_handle_irq(irq); diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c index 14b2f74554dc..a48baae4384d 100644 --- a/arch/blackfin/mach-bf537/ints-priority.c +++ b/arch/blackfin/mach-bf537/ints-priority.c @@ -89,8 +89,7 @@ static struct irq_chip bf537_generic_error_irqchip = { .irq_unmask = bf537_generic_error_unmask_irq, }; -static void bf537_demux_error_irq(unsigned int int_err_irq, - struct irq_desc *inta_desc) +static void bf537_demux_error_irq(struct irq_desc *inta_desc) { int irq = 0; @@ -182,15 +181,12 @@ static struct irq_chip bf537_mac_rx_irqchip = { .irq_unmask = bf537_mac_rx_unmask_irq, }; -static void bf537_demux_mac_rx_irq(unsigned int __int_irq, - struct irq_desc *desc) +static void bf537_demux_mac_rx_irq(struct irq_desc *desc) { - unsigned int int_irq = irq_desc_get_irq(desc); - if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) bfin_handle_irq(IRQ_MAC_RX); else - bfin_demux_gpio_irq(int_irq, desc); + bfin_demux_gpio_irq(desc); } #endif diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index a6d1b03cdf36..e8d4d748d0fd 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -656,8 +656,7 @@ static struct irq_chip bfin_mac_status_irqchip = { .irq_set_wake = bfin_mac_status_set_wake, }; -void bfin_demux_mac_status_irq(unsigned int int_err_irq, - struct irq_desc *inta_desc) +void bfin_demux_mac_status_irq(struct irq_desc *inta_desc) { int i, irq = 0; u32 status = bfin_read_EMAC_SYSTAT(); @@ -825,7 +824,7 @@ static void bfin_demux_gpio_block(unsigned int irq) } } -void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc) +void bfin_demux_gpio_irq(struct irq_desc *desc) { unsigned int inta_irq = irq_desc_get_irq(desc); unsigned int irq; diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index d487698e978a..ddcb45d7dfa7 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c @@ -93,7 +93,7 @@ static struct irq_chip megamod_chip = { .irq_unmask = unmask_megamod, }; -static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc) +static void megamod_irq_cascade(struct irq_desc *desc) { struct megamod_cascade_data *cascade; struct megamod_pic *pic; diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -43,4 +43,5 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += types.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index f9c86c475bbd..f211839e2cae 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c @@ -294,6 +294,8 @@ void pcibios_fixup_bus(struct pci_bus *bus) printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif + pci_read_bridge_bases(bus); + if (bus->number == 0) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 70e6ae1e7006..373cb23301e3 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -73,4 +73,5 @@ generic-y += uaccess.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -58,4 +58,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 95c39b95e97e..99c96a5e6016 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 319 /* length of syscall table */ +#define NR_syscalls 321 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 461079560c78..98e94e19a5a0 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -332,5 +332,7 @@ #define __NR_memfd_create 1340 #define __NR_bpf 1341 #define __NR_execveat 1342 +#define __NR_userfaultfd 1343 +#define __NR_membarrier 1344 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ae0de7bf5525..37cc7a65cd3e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1768,5 +1768,7 @@ sys_call_table: data8 sys_memfd_create // 1340 data8 sys_bpf data8 sys_execveat + data8 sys_userfaultfd + data8 sys_membarrier .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d89b6013c941..7cc3be9fa7c6 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -533,9 +533,10 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; - if (b->self) + if (b->self) { + pci_read_bridge_bases(b); pcibios_fixup_bridge_resources(b->self); - + } list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); platform_pci_fixup_bus(b); diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += module.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c index 47b5f90002ab..7ff739e94896 100644 --- a/arch/m68k/amiga/amiints.c +++ b/arch/m68k/amiga/amiints.c @@ -46,7 +46,7 @@ static struct irq_chip amiga_irq_chip = { * The builtin Amiga hardware interrupt handlers. */ -static void ami_int1(unsigned int irq, struct irq_desc *desc) +static void ami_int1(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -69,7 +69,7 @@ static void ami_int1(unsigned int irq, struct irq_desc *desc) } } -static void ami_int3(unsigned int irq, struct irq_desc *desc) +static void ami_int3(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -92,7 +92,7 @@ static void ami_int3(unsigned int irq, struct irq_desc *desc) } } -static void ami_int4(unsigned int irq, struct irq_desc *desc) +static void ami_int4(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; @@ -121,7 +121,7 @@ static void ami_int4(unsigned int irq, struct irq_desc *desc) } } -static void ami_int5(unsigned int irq, struct irq_desc *desc) +static void ami_int5(struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c index 47371de60427..b0a19e207a63 100644 --- a/arch/m68k/coldfire/intc-5272.c +++ b/arch/m68k/coldfire/intc-5272.c @@ -143,12 +143,10 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) * We need to be careful with the masking/acking due to the side effects * of masking an interrupt. */ -static void intc_external_irq(unsigned int __irq, struct irq_desc *desc) +static void intc_external_irq(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); - irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); - handle_simple_irq(irq, desc); + handle_simple_irq(desc); } static struct irq_chip intc_irq_chip = { diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -344,6 +349,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -355,6 +360,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -345,6 +350,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -364,6 +369,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -354,6 +359,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h index 81ca118d58af..a644f4a53b94 100644 --- a/arch/m68k/include/asm/irq.h +++ b/arch/m68k/include/asm/irq.h @@ -64,8 +64,7 @@ extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)); extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt); extern void m68k_setup_irq_controller(struct irq_chip *, - void (*handle)(unsigned int irq, - struct irq_desc *desc), + void (*handle)(struct irq_desc *desc), unsigned int irq, unsigned int cnt); extern unsigned int irq_canonicalize(unsigned int irq); diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h @@ -4,4 +4,34 @@ #define __ALIGN .align 4 #define __ALIGN_STR ".align 4" +/* + * Make sure the compiler doesn't do anything stupid with the + * arguments on the stack - they are owned by the *caller*, not + * the callee. This just fools gcc into not spilling into them, + * and keeps it from doing tailcall recursion and/or using the + * stack slots for temporaries, since they are live and "used" + * all the way to the end of the function. + */ +#define asmlinkage_protect(n, ret, args...) \ + __asmlinkage_protect##n(ret, ##args) +#define __asmlinkage_protect_n(ret, args...) \ + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) +#define __asmlinkage_protect0(ret) \ + __asmlinkage_protect_n(ret) +#define __asmlinkage_protect1(ret, arg1) \ + __asmlinkage_protect_n(ret, "m" (arg1)) +#define __asmlinkage_protect2(ret, arg1, arg2) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4)) +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5)) +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5), "m" (arg6)) + #endif diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index fe3fc9ae1b69..53c632c85b03 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -261,7 +261,7 @@ extern void via_irq_enable(int); extern void via_irq_disable(int); extern void via_nubus_irq_startup(int irq); extern void via_nubus_irq_shutdown(int irq); -extern void via1_irq(unsigned int irq, struct irq_desc *desc); +extern void via1_irq(struct irq_desc *desc); extern void via1_set_head(int); extern int via2_scsi_drq_pending(void); diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 356 +#define NR_syscalls 375 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -361,5 +361,24 @@ #define __NR_memfd_create 353 #define __NR_bpf 354 #define __NR_execveat 355 +#define __NR_socket 356 +#define __NR_socketpair 357 +#define __NR_bind 358 +#define __NR_connect 359 +#define __NR_listen 360 +#define __NR_accept4 361 +#define __NR_getsockopt 362 +#define __NR_setsockopt 363 +#define __NR_getsockname 364 +#define __NR_getpeername 365 +#define __NR_sendto 366 +#define __NR_sendmsg 367 +#define __NR_recvfrom 368 +#define __NR_recvmsg 369 +#define __NR_shutdown 370 +#define __NR_recvmmsg 371 +#define __NR_sendmmsg 372 +#define __NR_userfaultfd 373 +#define __NR_membarrier 374 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -376,4 +376,22 @@ ENTRY(sys_call_table) .long sys_memfd_create .long sys_bpf .long sys_execveat /* 355 */ - + .long sys_socket + .long sys_socketpair + .long sys_bind + .long sys_connect + .long sys_listen /* 360 */ + .long sys_accept4 + .long sys_getsockopt + .long sys_setsockopt + .long sys_getsockname + .long sys_getpeername /* 365 */ + .long sys_sendto + .long sys_sendmsg + .long sys_recvfrom + .long sys_recvmsg + .long sys_shutdown /* 370 */ + .long sys_recvmmsg + .long sys_sendmmsg + .long sys_userfaultfd + .long sys_membarrier diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c index 3fe0e43d44f6..f6f7d42713ec 100644 --- a/arch/m68k/mac/baboon.c +++ b/arch/m68k/mac/baboon.c @@ -45,7 +45,7 @@ void __init baboon_init(void) * Baboon interrupt handler. This works a lot like a VIA. */ -static void baboon_irq(unsigned int irq, struct irq_desc *desc) +static void baboon_irq(struct irq_desc *desc) { int irq_bit, irq_num; unsigned char events; diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 191610d97689..55d6592783f5 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c @@ -63,7 +63,7 @@ void __init oss_nubus_init(void) * Handle miscellaneous OSS interrupts. */ -static void oss_irq(unsigned int __irq, struct irq_desc *desc) +static void oss_irq(struct irq_desc *desc) { int events = oss->irq_pending & (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); @@ -99,7 +99,7 @@ static void oss_irq(unsigned int __irq, struct irq_desc *desc) * Unlike the VIA/RBV this is on its own autovector interrupt level. */ -static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc) +static void oss_nubus_irq(struct irq_desc *desc) { int events, irq_bit, i; diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 3b9e302e7a37..cd38f29955c8 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -113,7 +113,7 @@ void __init psc_init(void) * PSC interrupt handler. It's a lot like the VIA interrupt handler. */ -static void psc_irq(unsigned int __irq, struct irq_desc *desc) +static void psc_irq(struct irq_desc *desc) { unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index e198dec868e4..ce56e04386e7 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -446,7 +446,7 @@ void via_nubus_irq_shutdown(int irq) * via6522.c :-), disable/pending masks added. */ -void via1_irq(unsigned int irq, struct irq_desc *desc) +void via1_irq(struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; @@ -467,7 +467,7 @@ void via1_irq(unsigned int irq, struct irq_desc *desc) } while (events >= irq_bit); } -static void via2_irq(unsigned int irq, struct irq_desc *desc) +static void via2_irq(struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; @@ -493,7 +493,7 @@ static void via2_irq(unsigned int irq, struct irq_desc *desc) * VIA2 dispatcher as a fast interrupt handler. */ -void via_nubus_irq(unsigned int irq, struct irq_desc *desc) +static void via_nubus_irq(struct irq_desc *desc) { int slot_irq; unsigned char slot_bit, events; diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -54,4 +54,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index a336094a7a6c..3074b64793e6 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c @@ -94,13 +94,11 @@ void do_IRQ(int irq, struct pt_regs *regs) "MOV D0.5,%0\n" "MOV D1Ar1,%1\n" "MOV D1RtP,%2\n" - "MOV D0Ar2,%3\n" "SWAP A0StP,D0.5\n" "SWAP PC,D1RtP\n" "MOV A0StP,D0.5\n" : - : "r" (isp), "r" (irq), "r" (desc->handle_irq), - "r" (desc) + : "r" (isp), "r" (desc), "r" (desc->handle_irq) : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", "D0.5" diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 6b8b75266801..ae838ed5fcf2 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -863,7 +863,14 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) void pcibios_fixup_bus(struct pci_bus *bus) { - /* Fixup the bus */ + /* When called from the generic PCI probe, read PCI<->PCI bridge + * bases. This is -not- called when generating the PCI tree from + * the OF device-tree. + */ + if (bus->self != NULL) + pci_read_bridge_bases(bus); + + /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 4c496c50edf6..da9f9220048f 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c @@ -851,7 +851,7 @@ static struct syscore_ops alchemy_gpic_pmops = { /* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */ #define DISP(name, base, addr) \ -static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \ +static void au1000_##name##_dispatch(struct irq_desc *d) \ { \ unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \ if (likely(r)) \ @@ -865,7 +865,7 @@ DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT) DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT) DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT) -static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d) +static void alchemy_gpic_dispatch(struct irq_desc *d) { int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC); generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i); diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 324ad72d7c36..faeddf119fd4 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c @@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(bcsr_mod); /* * DB1200/PB1200 CPLD IRQ muxer */ -static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) +static void bcsr_csc_handler(struct irq_desc *d) { unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); struct irq_chip *chip = irq_desc_get_chip(d); diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index ec9a371f1e62..8da996142d6a 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -69,7 +69,7 @@ static struct irqaction ar2315_ahb_err_interrupt = { .name = "ar2315-ahb-error", }; -static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar2315_misc_irq_handler(struct irq_desc *desc) { u32 pending = ar2315_rst_reg_read(AR2315_ISR) & ar2315_rst_reg_read(AR2315_IMR); diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index e63e38fa4880..acd55a9cffe3 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -73,7 +73,7 @@ static struct irqaction ar5312_ahb_err_interrupt = { .name = "ar5312-ahb-error", }; -static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar5312_misc_irq_handler(struct irq_desc *desc) { u32 pending = ar5312_rst_reg_read(AR5312_ISR) & ar5312_rst_reg_read(AR5312_IMR); diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 807132b838b2..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -26,7 +26,7 @@ #include "common.h" #include "machtypes.h" -static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ath79_misc_irq_handler(struct irq_desc *desc) { void __iomem *base = ath79_reset_base; u32 pending; @@ -119,7 +119,7 @@ static void __init ath79_misc_irq_init(void) irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); } -static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) { u32 status; @@ -148,7 +148,7 @@ static void ar934x_ip2_irq_init(void) irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); } -static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void qca955x_ip2_irq_dispatch(struct irq_desc *desc) { u32 status; @@ -171,7 +171,7 @@ static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) } } -static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void qca955x_ip3_irq_dispatch(struct irq_desc *desc) { u32 status; @@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( return 0; } -IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", - ath79_misc_intc_of_init); + +static int __init ar7100_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", + ar7100_misc_intc_of_init); + +static int __init ar7240_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", + ar7240_misc_intc_of_init); static int __init ar79_cpu_intc_of_init( struct device_node *node, struct device_node *parent) diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index f26c3c661cca..0352bc8d56b3 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2221,7 +2221,7 @@ static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) if (irqd_get_trigger_type(irq_data) & IRQ_TYPE_EDGE_BOTH) cvmx_write_csr(host_data->raw_reg, 1ull << i); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); } } diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -933,7 +933,7 @@ void __init plat_mem_setup(void) while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) && (total < MAX_MEMORY)) { memory = cvmx_bootmem_phy_alloc(mem_alloc_size, - __pa_symbol(&__init_end), -1, + __pa_symbol(&_end), -1, 0x100000, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (memory >= 0) { diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -17,4 +17,5 @@ generic-y += segment.h generic-y += serial.h generic-y += trace_clock.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -20,6 +20,9 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif +#ifndef cpu_has_ftlb +#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) +#endif #ifndef cpu_has_tlbinv #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -385,6 +385,7 @@ enum cpu_type_enum { #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ +#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 9e777cd42b67..d10fd80dbb7e 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si */ #define ioremap_nocache(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED) +#define ioremap_uc ioremap_nocache /* * ioremap_cachable - map bus memory into CPU space diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e8c8d9d0c45f..5a1a882e0a75 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -61,6 +61,7 @@ #define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HALT_POLL_NS_DEFAULT 500000 @@ -128,6 +129,7 @@ struct kvm_vcpu_stat { u32 msa_disabled_exits; u32 flush_dcache_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h @@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, } /** + * maar_init() - initialise MAARs + * + * Performs initialisation of MAARs for the current CPU, making use of the + * platforms implementation of platform_maar_init where necessary and + * duplicating the setup it provides on secondary CPUs. + */ +extern void maar_init(void); + +/** * struct maar_config - MAAR configuration data * @lower: The lowest address that the MAAR pair will affect. Must be * aligned to a 2^16 byte boundary. diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) +BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) /* Core Local & Core Other register accessor functions */ BUILD_CM_Cx_RW(reset_release, 0x00) @@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) +/* GCR_SYS_CONFIG2 register fields */ +#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 +#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) + /* GCR_Cx_COHERENCE register fields */ #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) @@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) return read_gcr_rev(); } +/** + * mips_cm_max_vp_width() - return the width in bits of VP indices + * + * Return: the width, in bits, of VP indices in fields that combine core & VP + * indices. + */ +static inline unsigned int mips_cm_max_vp_width(void) +{ + extern int smp_num_siblings; + + if (mips_cm_revision() >= CM_REV_CM3) + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + + return smp_num_siblings; +} + +/** + * mips_cm_vp_id() - calculate the hardware VP ID for a CPU + * @cpu: the CPU whose VP ID to calculate + * + * Hardware such as the GIC uses identifiers for VPs which may not match the + * CPU numbers used by Linux. This function calculates the hardware VP + * identifier corresponding to a given CPU. + * + * Return: the VP ID for the CPU. + */ +static inline unsigned int mips_cm_vp_id(unsigned int cpu) +{ + unsigned int core = cpu_data[cpu].core; + unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); + + return (core * mips_cm_max_vp_width()) + vp; +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -487,6 +487,8 @@ /* Bits specific to the MIPS32/64 PRA. */ #define MIPS_CONF_MT (_ULCAST_(7) << 7) +#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) +#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) #define MIPS_CONF_AR (_ULCAST_(7) << 10) #define MIPS_CONF_AT (_ULCAST_(3) << 13) #define MIPS_CONF_M (_ULCAST_(1) << 31) diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index 2a4c128277e4..be52c2125d71 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -57,8 +57,8 @@ #include <asm/mach-netlogic/multi-node.h> struct irq_desc; -void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); -void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); +void nlm_smp_function_ipi_handler(struct irq_desc *desc); +void nlm_smp_resched_ipi_handler(struct irq_desc *desc); void nlm_smp_irq_init(int hwcpuid); void nlm_boot_secondary_cpus(void); int nlm_wakeup_secondary_cpus(void); diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index c4ddc4f0d2dc..23cd9b118c9e 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,16 +13,15 @@ #define __SWAB_64_THRU_32__ -#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ - defined(_MIPS_ARCH_LOONGSON3A) +#if !defined(__mips16) && \ + ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ + defined(_MIPS_ARCH_LOONGSON3A)) -static inline __attribute__((nomips16)) __attribute_const__ - __u16 __arch_swab16(__u16 x) +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " .set pop \n" : "=r" (x) @@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab16 __arch_swab16 -static inline __attribute__((nomips16)) __attribute_const__ - __u32 __arch_swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " rotr %0, %0, 16 \n" " .set pop \n" @@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__ * 64-bit kernel on r2 CPUs. */ #ifdef __mips64 -static inline __attribute__((nomips16)) __attribute_const__ - __u64 __arch_swab64(__u64 x) +static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( " .set push \n" " .set arch=mips64r2 \n" - " .set nomips16 \n" " dsbh %0, %1 \n" " dshd %0, %0 \n" " .set pop \n" @@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab64 __arch_swab64 #endif /* __mips64 */ -#endif /* MIPS R2 or newer or Loongson 3A */ +#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -377,16 +377,18 @@ #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) #define __NR_execveat (__NR_Linux + 356) +#define __NR_userfaultfd (__NR_Linux + 357) +#define __NR_membarrier (__NR_Linux + 358) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 356 +#define __NR_Linux_syscalls 358 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 356 +#define __NR_O32_Linux_syscalls 358 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -711,16 +713,18 @@ #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) #define __NR_execveat (__NR_Linux + 316) +#define __NR_userfaultfd (__NR_Linux + 317) +#define __NR_membarrier (__NR_Linux + 318) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 316 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 316 +#define __NR_64_Linux_syscalls 318 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1049,15 +1053,17 @@ #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) #define __NR_execveat (__NR_Linux + 320) +#define __NR_userfaultfd (__NR_Linux + 321) +#define __NR_membarrier (__NR_Linux + 322) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 320 +#define __NR_Linux_syscalls 322 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 320 +#define __NR_N32_Linux_syscalls 322 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -26,6 +26,7 @@ #include <linux/power/jz4740-battery.h> #include <linux/power/gpio-charger.h> +#include <asm/mach-jz4740/gpio.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/jz4740_mmc.h> #include <asm/mach-jz4740/jz4740_nand.h> diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 6cd69fdaa1c5..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -28,6 +28,7 @@ #include <linux/seq_file.h> #include <asm/mach-jz4740/base.h> +#include <asm/mach-jz4740/gpio.h> #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) @@ -291,7 +292,7 @@ static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int i writel(mask, reg); } -static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) +static void jz_gpio_irq_demux_handler(struct irq_desc *desc) { uint32_t flag; unsigned int gpio_irq; diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -39,6 +39,7 @@ mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt + nop .endm .section .text.cps-vec @@ -223,10 +224,9 @@ LEAF(excep_ejtag) END(excep_ejtag) LEAF(mips_cps_core_init) -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f - nop .set push .set mips64r2 @@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + li t9, 0 +#ifdef CONFIG_MIPS_MT_SMP has_mt ta2, 1f - li t9, 0 /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 @@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 +#endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE @@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) PTR_ADDU v0, v0, ta3 -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* If the core doesn't support MT then return */ bnez ta2, 1f @@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) 2: .set pop -#endif /* CONFIG_MIPS_MT */ +#endif /* CONFIG_MIPS_MT_SMP */ /* Return */ jr ra diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; - int isa; + int isa, mt; config0 = read_c0_config(); /* * Look for Standard TLB or Dual VTLB and FTLB */ - if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || - (((config0 & MIPS_CONF_MT) >> 7) == 4)) + mt = config0 & MIPS_CONF_MT; + if (mt == MIPS_CONF_MT_TLB) c->options |= MIPS_CPU_TLB; + else if (mt == MIPS_CONF_MT_FTLB) + c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { @@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) if (cpu_has_tlb) { if (((config4 & MIPS_CONF4_IE) >> 29) == 2) c->options |= MIPS_CPU_TLBINV; + /* - * This is a bit ugly. R6 has dropped that field from - * config4 and the only valid configuration is VTLB+FTLB so - * set a good value for mmuextdef for that case. + * R6 has dropped the MMUExtDef field from config4. + * On R6 the fields always describe the FTLB, and only if it is + * present according to Config.MT. */ - if (cpu_has_mips_r6) + if (!cpu_has_mips_r6) + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + else if (cpu_has_ftlb) mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; else - mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + mmuextdef = 0; switch (mmuextdef) { case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -18,7 +18,7 @@ .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 7 LEAF(resume) @@ -28,30 +28,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* - * check if we need to save FPU registers - */ - .set push - .set noreorder - beqz a3, 1f - PTR_L t3, TASK_THREAD_INFO(a0) - .set pop - - /* - * clear saved user stack CU1 bit - */ - LONG_L t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - LONG_S t0, ST_OFF(t3) - - .set push - .set arch=mips64r2 - fpu_save_double a0 t0 t1 # c0_status passed in t0 - # clobbers t1 - .set pop -1: - #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -31,18 +31,8 @@ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) /* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - -/* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS @@ -50,22 +40,6 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) - beqz a3, 1f - - PTR_L t3, TASK_THREAD_INFO(a0) - - /* - * clear saved user stack CU1 bit - */ - lw t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - sw t0, ST_OFF(t3) - - fpu_save_single a0, t0 # clobbers t0 - -1: - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) lw t1, PT_EPC(sp) # skip syscall on return subu v0, v0, __NR_O32_Linux # check syscall number - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) - beqz t0, illegal_syscall - - sll t0, v0, 2 - la t1, sys_call_table - addu t1, t0 - lw t2, (t1) # syscall routine - beqz t2, illegal_syscall sw a3, PT_R26(sp) # save a3 for syscall restarting @@ -96,6 +88,16 @@ loads_done: li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes +syscall_common: + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + beqz t0, illegal_syscall + + sll t0, v0, 2 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + + beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) @@ -116,7 +118,7 @@ o32_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp /* @@ -129,27 +131,18 @@ syscall_trace_entry: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall + + move v0, s0 # restore syscall - move t0, s0 RESTORE_STATIC lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f - - lw t1, PT_R2(sp) # syscall number - negu v0 # error - sw t1, PT_R0(sp) # save it for syscall restarting -1: sw v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -599,3 +592,5 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) .set at #endif - dsubu t0, v0, __NR_64_Linux # check syscall number - sltiu t0, t0, __NR_64_Linux_syscalls + 1 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif - beqz t0, illegal_syscall - - dsll t0, v0, 3 # offset into table - ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) - # syscall routine sd a3, PT_R26(sp) # save a3 for syscall restarting @@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) and t0, t1, t0 bnez t0, syscall_trace_entry +syscall_common: + dsubu t2, v0, __NR_64_Linux + sltiu t0, t2, __NR_64_Linux_syscalls + 1 + beqz t0, illegal_syscall + + dsll t0, t2, 3 # offset into table + dla t2, sys_call_table + daddu t0, t2, t0 + ld t2, (t0) # syscall routine + beqz t2, illegal_syscall + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -78,14 +82,14 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move v0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -93,19 +97,9 @@ syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ @@ -436,4 +430,6 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 5315 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) and t0, t1, t0 bnez t0, n32_syscall_trace_entry +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -75,9 +76,9 @@ n32_syscall_trace_entry: move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -85,19 +86,9 @@ n32_syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to @@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) PTR sys_memfd_create PTR sys_bpf PTR compat_sys_execveat /* 6320 */ + PTR sys_userfaultfd + PTR sys_membarrier .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -87,6 +87,7 @@ loads_done: and t0, t1, t0 bnez t0, trace_a_syscall +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -130,9 +131,9 @@ trace_a_syscall: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -142,19 +143,9 @@ trace_a_syscall: ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -584,4 +575,6 @@ EXPORT(sys32_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR compat_sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -338,7 +338,7 @@ static void __init bootmem_init(void) if (end <= reserved_end) continue; #ifdef CONFIG_BLK_DEV_INITRD - /* mapstart should be after initrd_end */ + /* Skip zones before initrd and initrd itself */ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) continue; #endif @@ -371,6 +371,14 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } +#ifdef CONFIG_BLK_DEV_INITRD + /* + * mapstart should be after initrd_end + */ + if (initrd_end) + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); +#endif + /* * Initialize the boot-time allocator with low memory only. */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -42,6 +42,7 @@ #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> +#include <asm/maar.h> cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ @@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); + maar_init(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index cd4c129ce743..49ff3bfc007e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -64,6 +64,9 @@ void __init prom_init_env(void) } if (memsize == 0) memsize = 256; + + loongson_sysconf.nr_uarts = 1; + pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); #else struct boot_params *boot_p; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -44,6 +44,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> +#include <asm/maar.h> /* * We have up to 8 empty zeroed pages so we can map one of the right colour @@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + struct maar_config cfg[BOOT_MEM_MAP_MAX]; + unsigned i, num_configured, num_cfg = 0; + phys_addr_t skip; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: + continue; + } + + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); + + cfg[num_cfg].lower = boot_mem_map.map[i].addr; + cfg[num_cfg].lower += skip; + + cfg[num_cfg].upper = cfg[num_cfg].lower; + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; + cfg[num_cfg].upper -= skip; + + cfg[num_cfg].attrs = MIPS_MAAR_S; + num_cfg++; + } + + num_configured = maar_config(cfg, num_cfg, num_pairs); + if (num_configured < num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", + num_pairs, num_cfg); + + return num_configured; +} + +void maar_init(void) +{ + unsigned num_maars, used, i; + phys_addr_t lower, upper, attr; + static struct { + struct maar_config cfgs[3]; + unsigned used; + } recorded = { { { 0 } }, 0 }; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Set MAARs using values we recorded already */ + if (recorded.used) { + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); + BUG_ON(used != recorded.used); + } else { + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + } + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } + + if (recorded.used) + return; + + pr_info("MAAR configuration:\n"); + for (i = 0; i < num_maars; i += 2) { + write_c0_maari(i); + back_to_back_c0_hazard(); + upper = read_c0_maar(); + + write_c0_maari(i + 1); + back_to_back_c0_hazard(); + lower = read_c0_maar(); + + attr = lower & upper; + lower = (lower & MIPS_MAAR_ADDR) << 4; + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; + + pr_info(" [%d]: ", i / 2); + if (!(attr & MIPS_MAAR_V)) { + pr_cont("disabled\n"); + continue; + } + + pr_cont("%pa-%pa", &lower, &upper); + + if (attr & MIPS_MAAR_S) + pr_cont(" speculate"); + + pr_cont("\n"); + + /* Record the setup for use on secondary CPUs */ + if (used <= ARRAY_SIZE(recorded.cfgs)) { + recorded.cfgs[recorded.used].lower = lower; + recorded.cfgs[recorded.used].upper = upper; + recorded.cfgs[recorded.used].attrs = attr; + recorded.used++; + } + } +} + #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { @@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) #endif } -unsigned __weak platform_maar_init(unsigned num_pairs) -{ - struct maar_config cfg[BOOT_MEM_MAP_MAX]; - unsigned i, num_configured, num_cfg = 0; - phys_addr_t skip; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - continue; - } - - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); - - cfg[num_cfg].lower = boot_mem_map.map[i].addr; - cfg[num_cfg].lower += skip; - - cfg[num_cfg].upper = cfg[num_cfg].lower; - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; - cfg[num_cfg].upper -= skip; - - cfg[num_cfg].attrs = MIPS_MAAR_S; - num_cfg++; - } - - num_configured = maar_config(cfg, num_cfg, num_pairs); - if (num_configured < num_cfg) - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", - num_pairs, num_cfg); - - return num_configured; -} - -static void maar_init(void) -{ - unsigned num_maars, used, i; - - if (!cpu_has_maar) - return; - - /* Detect the number of MAARs */ - write_c0_maari(~0); - back_to_back_c0_hazard(); - num_maars = read_c0_maari() + 1; - - /* MAARs should be in pairs */ - WARN_ON(num_maars % 2); - - /* Configure the required MAARs */ - used = platform_maar_init(num_maars / 2); - - /* Disable any further MAARs */ - for (i = (used * 2); i < num_maars; i++) { - write_c0_maari(i); - back_to_back_c0_hazard(); - write_c0_maar(0); - back_to_back_c0_hazard(); - } -} - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S @@ -57,15 +57,28 @@ LEAF(sk_load_word) is_offset_negative(word) - .globl sk_load_word_positive -sk_load_word_positive: +FEXPORT(sk_load_word_positive) is_offset_in_header(4, word) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lw $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A rotr $r_A, t0, 16 +# else + sll t0, $r_A, 24 + srl t1, $r_A, 24 + srl t2, $r_A, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_A, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -73,15 +86,24 @@ sk_load_word_positive: LEAF(sk_load_half) is_offset_negative(half) - .globl sk_load_half_positive -sk_load_half_positive: +FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lh $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A seh $r_A, t0 +# else + sll t0, $r_A, 24 + andi t1, $r_A, 0xff00 + sra t0, t0, 16 + srl t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -89,8 +111,7 @@ sk_load_half_positive: LEAF(sk_load_byte) is_offset_negative(byte) - .globl sk_load_byte_positive -sk_load_byte_positive: +FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset @@ -148,23 +169,47 @@ sk_load_byte_positive: NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) bpf_slow_path_common(4) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_s0 jr $r_ra rotr $r_A, t0, 16 -#endif +# else + sll t0, $r_s0, 24 + srl t1, $r_s0, 24 + srl t2, $r_s0, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_s0, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra - move $r_A, $r_s0 + move $r_A, $r_s0 +#endif END(bpf_slow_path_word) NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) bpf_slow_path_common(2) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) jr $r_ra wsbh $r_A, $r_s0 -#endif +# else + sll t0, $r_s0, 8 + andi t1, $r_s0, 0xff00 + andi t0, t0, 0xff00 + srl t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra move $r_A, $r_s0 +#endif END(bpf_slow_path_half) diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 0136b4f9c9cd..10d86d54880a 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -82,7 +82,7 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) } /* IRQ_IPI_SMP_FUNCTION Handler */ -void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) +void nlm_smp_function_ipi_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); clear_c0_eimr(irq); @@ -92,7 +92,7 @@ void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc) } /* IRQ_IPI_SMP_RESCHEDULE handler */ -void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc) +void nlm_smp_resched_ipi_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); clear_c0_eimr(irq); diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index f8d0acb4f973..b4fa6413c4e5 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -318,7 +318,7 @@ static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc) return 0; } -static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc) +static void ar2315_pci_irq_handler(struct irq_desc *desc) { struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc); u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) & diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index ad35a5e6a56c..7db963deec73 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -226,7 +226,7 @@ static struct pci_ops ar71xx_pci_ops = { .write = ar71xx_pci_write_config, }; -static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ar71xx_pci_irq_handler(struct irq_desc *desc) { struct ar71xx_pci_controller *apc; void __iomem *base = ath79_reset_base; diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 907d11dd921b..2013dad700df 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -225,7 +225,7 @@ static struct pci_ops ar724x_pci_ops = { .write = ar724x_pci_write, }; -static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ar724x_pci_irq_handler(struct irq_desc *desc) { struct ar724x_pci_controller *apc; void __iomem *base; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 53c8efaf1572..ed6732f9aa87 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -129,7 +129,7 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); } -static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void rt3883_pci_irq_handler(struct irq_desc *desc) { struct rt3883_pci_controller *rpc; u32 pending; diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index c6996cf67a5c..b8a0bf5766f2 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -311,6 +311,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) void pcibios_fixup_bus(struct pci_bus *bus) { + struct pci_dev *dev = bus->self; + + if (pci_has_flag(PCI_PROBE_ONLY) && dev && + (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_read_bridge_bases(bus); + } } EXPORT_SYMBOL(PCIBIOS_MIN_IO); diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 8c624a8b9ea2..4cf77f358395 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -96,7 +96,7 @@ unsigned int get_c0_compare_int(void) return CP0_LEGACY_COMPARE_IRQ; } -static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ralink_intc_irq_handler(struct irq_desc *desc) { u32 pending = rt_intc_r32(INTC_REG_STATUS0); diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index deaa893efba5..3dfe2d31c67b 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c @@ -324,6 +324,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) struct pci_dev *dev; if (bus->self) { + pci_read_bridge_bases(bus); pcibios_fixup_bridge_resources(bus->self); } diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -61,4 +61,5 @@ generic-y += types.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 73eddda53b8e..4eec430d8fa8 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -28,6 +28,9 @@ BOOTCFLAGS += -m64 endif ifdef CONFIG_CPU_BIG_ENDIAN BOOTCFLAGS += -mbig-endian +else +BOOTCFLAGS += -mlittle-endian +BOOTCFLAGS += $(call cc-option,-mabi=elfv2) endif BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 6bc0ee4b1070..2c041b535a64 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 7991f37e5fe2..36871a4bfa54 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 0dc42c5082b7..5f8229e24fe6 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -3,7 +3,6 @@ #ifdef __KERNEL__ -#include <asm/reg.h> /* bytes per L1 cache line */ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) @@ -40,12 +39,6 @@ struct ppc64_caches { }; extern struct ppc64_caches ppc64_caches; - -static inline void logmpp(u64 x) -{ - asm volatile(PPC_LOGMPP(R1) : : "r" (x)); -} - #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if defined(__ASSEMBLY__) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 98eebbf66340..887c259556df 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -44,6 +44,7 @@ #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #endif +#define KVM_HALT_POLL_NS_DEFAULT 500000 /* These values are internal and can be increased later */ #define KVM_NR_IRQCHIPS 1 @@ -108,6 +109,7 @@ struct kvm_vcpu_stat { u32 dec_exits; u32 ext_intr_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 dbell_exits; u32 gdbell_exits; @@ -295,8 +297,6 @@ struct kvmppc_vcore { u32 arch_compat; ulong pcr; ulong dpdes; /* doorbell state (POWER8) */ - void *mpp_buffer; /* Micro Partition Prefetch buffer */ - bool mpp_buffer_is_valid; ulong conferring_threads; }; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cab6753f1be5..3f191f573d4f 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -61,8 +61,13 @@ struct machdep_calls { unsigned long addr, unsigned char *hpte_slot_array, int psize, int ssize, int local); - /* special for kexec, to be called in real mode, linear mapping is - * destroyed as well */ + /* + * Special for kexec. + * To be called in real mode with interrupts disabled. No locks are + * taken as such, concurrent access on pre POWER5 hardware could result + * in a deadlock. + * The linear mapping is destroyed as well. + */ void (*hpte_clear_all)(void); void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 790f5d1d9a46..7ab04fc59e24 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -141,7 +141,6 @@ #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 -#define PPC_INST_LOGMPP 0x7c0007e4 #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 @@ -285,20 +284,6 @@ #define __PPC_EH(eh) 0 #endif -/* POWER8 Micro Partition Prefetch (MPP) parameters */ -/* Address mask is common for LOGMPP instruction and MPPR SPR */ -#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL - -/* Bits 60 and 61 of MPP SPR should be set to one of the following */ -/* Aborting the fetch is indeed setting 00 in the table size bits */ -#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60) -#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60) - -/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */ -#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54) -#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54) -#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54) - /* Deal with instructions that older assemblers aren't aware of */ #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ __PPC_RA(a) | __PPC_RB(b)) @@ -307,8 +292,6 @@ #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \ - __PPC_RB(b)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 25784cc959a0..1e155ca6d33c 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h @@ -59,14 +59,14 @@ enum qe_ic_grp_id { #ifdef CONFIG_QUICC_ENGINE void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)); + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)); unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); #else static inline void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)) + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)) {} static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { return 0; } @@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high); int qe_ic_set_priority(unsigned int virq, unsigned int priority); int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); -static inline void qe_ic_cascade_low_ipic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); @@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, generic_handle_irq(cascade_irq); } -static inline void qe_ic_cascade_high_ipic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); @@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, generic_handle_irq(cascade_irq); } -static inline void qe_ic_cascade_low_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); @@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq, chip->irq_eoi(&desc->irq_data); } -static inline void qe_ic_cascade_high_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); @@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq, chip->irq_eoi(&desc->irq_data); } -static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, - struct irq_desc *desc) +static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) { struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index aa1cc5f015ee..a908ada8e0a5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -226,7 +226,6 @@ #define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_RUNLATCH 0x1 #define SPRN_DAWR 0xB4 -#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */ #define SPRN_RPR 0xBA /* Relative Priority Register */ #define SPRN_CIABR 0xBB #define CIABR_PRIV 0x3 diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 71f2b3f02cf8..126d0c4f9b7d 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -368,3 +368,5 @@ SYSCALL_SPU(memfd_create) SYSCALL_SPU(bpf) COMPAT_SYS(execveat) PPC64ONLY(switch_endian) +SYSCALL_SPU(userfaultfd) +SYSCALL_SPU(membarrier) diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h index 5653d7cc3e24..ae59d5b672b0 100644 --- a/arch/powerpc/include/asm/tsi108_pci.h +++ b/arch/powerpc/include/asm/tsi108_pci.h @@ -39,7 +39,7 @@ extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary); extern void tsi108_pci_int_init(struct device_node *node); -extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc); +extern void tsi108_irq_cascade(struct irq_desc *desc); extern void tsi108_clear_pci_cfg_error(void); #endif /* _ASM_POWERPC_TSI108_PCI_H */ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f4f8b667d75b..13411be86041 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 364 +#define __NR_syscalls 366 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 5b3a903adae6..e4396a7d0f7c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +static inline unsigned long zero_bytemask(unsigned long mask) +{ + return ~1ul << __fls(mask); +} + #else #ifdef CONFIG_64BIT diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index e4aa173dae62..6337738018aa 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -386,5 +386,7 @@ #define __NR_bpf 361 #define __NR_execveat 362 #define __NR_switch_endian 363 +#define __NR_userfaultfd 364 +#define __NR_membarrier 365 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 45096033d37b..290559df1e8b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -441,7 +441,7 @@ void migrate_irqs(void) chip = irq_data_get_irq_chip(data); - cpumask_and(mask, data->affinity, map); + cpumask_and(mask, irq_data_get_affinity_mask(data), map); if (cpumask_any(mask) >= nr_cpu_ids) { pr_warn("Breaking affinity for irq %i\n", irq); cpumask_copy(mask, map); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a1d0632d97c6..7587b2ae5f77 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1032,7 +1032,13 @@ void pcibios_set_master(struct pci_dev *dev) void pcibios_fixup_bus(struct pci_bus *bus) { - /* Fixup the bus */ + /* When called from the generic PCI probe, read PCI<->PCI bridge + * bases. This is -not- called when generating the PCI tree from + * the OF device-tree. + */ + pci_read_bridge_bases(bus); + + /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 84bf934cf748..5a753fae8265 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!rtas.entry) + return -EINVAL; + if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) return -EFAULT; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index bb02e9f6944e..ad8c9db61237 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -38,6 +38,7 @@ #include <asm/udbg.h> #include <asm/mmu_context.h> #include <asm/epapr_hcalls.h> +#include <asm/code-patching.h> #define DBG(fmt...) @@ -109,6 +110,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) * This is called very early on the boot process, after a minimal * MMU environment has been set up but before MMU_init is called. */ +extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ + notrace void __init machine_init(u64 dt_ptr) { lockdep_init(); @@ -116,6 +119,9 @@ notrace void __init machine_init(u64 dt_ptr) /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); + patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP); + patch_instruction(&memset_nocache_branch, PPC_INST_NOP); + /* Do some early initialization based on the flat device tree */ early_init_devtree(__va(dt_ptr)); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index d75bf325f54a..099c79d8c160 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "queue_intr", VCPU_STAT(queue_intr) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "pf_storage", VCPU_STAT(pf_storage) }, { "sp_storage", VCPU_STAT(sp_storage) }, @@ -828,12 +829,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) unsigned long size = kvmppc_get_gpr(vcpu, 4); unsigned long addr = kvmppc_get_gpr(vcpu, 5); u64 buf; + int srcu_idx; int ret; if (!is_power_of_2(size) || (size > sizeof(buf))) return H_TOO_HARD; + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); if (ret != 0) return H_TOO_HARD; @@ -868,6 +872,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) unsigned long addr = kvmppc_get_gpr(vcpu, 5); unsigned long val = kvmppc_get_gpr(vcpu, 6); u64 buf; + int srcu_idx; int ret; switch (size) { @@ -891,7 +896,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) return H_TOO_HARD; } + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); if (ret != 0) return H_TOO_HARD; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9754e6815e52..9c26c5a96ea2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -36,7 +36,6 @@ #include <asm/reg.h> #include <asm/cputable.h> -#include <asm/cache.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> @@ -75,12 +74,6 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); -#if defined(CONFIG_PPC_64K_PAGES) -#define MPP_BUFFER_ORDER 0 -#elif defined(CONFIG_PPC_4K_PAGES) -#define MPP_BUFFER_ORDER 3 -#endif - static int dynamic_mt_modes = 6; module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); @@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) vcore->kvm = kvm; INIT_LIST_HEAD(&vcore->preempt_list); - vcore->mpp_buffer_is_valid = false; - - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - vcore->mpp_buffer = (void *)__get_free_pages( - GFP_KERNEL|__GFP_ZERO, - MPP_BUFFER_ORDER); - return vcore; } @@ -1894,33 +1880,6 @@ static int on_primary_thread(void) return 1; } -static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) -{ - phys_addr_t phy_addr, mpp_addr; - - phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; - - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT); - logmpp(mpp_addr | PPC_LOGMPP_LOG_L2); - - vc->mpp_buffer_is_valid = true; -} - -static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) -{ - phys_addr_t phy_addr, mpp_addr; - - phy_addr = virt_to_phys(vc->mpp_buffer); - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; - - /* We must abort any in-progress save operations to ensure - * the table is valid so that prefetch engine knows when to - * stop prefetching. */ - logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT); - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); -} - /* * A list of virtual cores for each physical CPU. * These are vcores that could run but their runner VCPU tasks are @@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) srcu_idx = srcu_read_lock(&vc->kvm->srcu); - if (vc->mpp_buffer_is_valid) - kvmppc_start_restoring_l2_cache(vc); - __kvmppc_vcore_entry(); - if (vc->mpp_buffer) - kvmppc_start_saving_l2_cache(vc); - srcu_read_unlock(&vc->kvm->srcu, srcu_idx); spin_lock(&vc->lock); @@ -2692,9 +2645,13 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && (vc->vcore_state == VCORE_RUNNING || - vc->vcore_state == VCORE_EXITING)) + vc->vcore_state == VCORE_EXITING || + vc->vcore_state == VCORE_PIGGYBACK)) kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); + if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) + kvmppc_vcore_end_preempt(vc); + if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_remove_runnable(vc, vcpu); vcpu->stat.signal_exits++; @@ -3069,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm) { long int i; - for (i = 0; i < KVM_MAX_VCORES; ++i) { - if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { - struct kvmppc_vcore *vc = kvm->arch.vcores[i]; - free_pages((unsigned long)vc->mpp_buffer, - MPP_BUFFER_ORDER); - } + for (i = 0; i < KVM_MAX_VCORES; ++i) kfree(kvm->arch.vcores[i]); - } kvm->arch.online_vcores = 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 2273dcacef39..b98889e9851d 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1257,6 +1257,7 @@ mc_cont: bl kvmhv_accumulate_time #endif + mr r3, r12 /* Increment exit count, poke other threads to exit */ bl kvmhv_commence_exit nop diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ae458f0fd061..fd5875179e5c 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "dec", VCPU_STAT(dec_exits) }, { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "doorbell", VCPU_STAT(dbell_exits) }, { "guest doorbell", VCPU_STAT(gdbell_exits) }, diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 2ef50c629470..c44df2dbedd5 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1) * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination * area is cacheable. -- paulus + * + * During early init, cache might not be active yet, so dcbz cannot be used. + * We therefore skip the optimised bloc that uses dcbz. This jump is + * replaced by a nop once cache is active. This is done in machine_init() */ _GLOBAL(memset) rlwimi r4,r4,8,16,23 @@ -88,6 +92,8 @@ _GLOBAL(memset) subf r6,r0,r6 cmplwi 0,r4,0 bne 2f /* Use normal procedure if r4 is not zero */ +_GLOBAL(memset_nocache_branch) + b 2f /* Skip optimised bloc until cache is enabled */ clrlwi r7,r6,32-LG_CACHELINE_BYTES add r8,r7,r5 @@ -128,6 +134,10 @@ _GLOBAL(memset) * the destination area is cacheable. * We only use this version if the source and dest don't overlap. * -- paulus. + * + * During early init, cache might not be active yet, so dcbz cannot be used. + * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is + * replaced by a nop once cache is active. This is done in machine_init() */ _GLOBAL(memmove) cmplw 0,r3,r4 @@ -135,6 +145,7 @@ _GLOBAL(memmove) /* fall through */ _GLOBAL(memcpy) + b generic_memcpy add r7,r3,r5 /* test if the src & dst overlap */ add r8,r4,r5 cmplw 0,r4,r7 diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 13befa35d8a8..c8822af10a58 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, * be when they isi), and we are the only one left. We rely on our kernel * mapping being 0xC0's and the hardware ignoring those two real bits. * + * This must be called with interrupts disabled. + * + * Taking the native_tlbie_lock is unsafe here due to the possibility of + * lockdep being on. On pre POWER5 hardware, not taking the lock could + * cause deadlock. POWER5 and newer not taking the lock is fine. This only + * gets called during boot before secondary CPUs have come up and during + * crashdump and all bets are off anyway. + * * TODO: add batching support when enabled. remember, no dynamic memory here, * athough there is the control page available... */ static void native_hpte_clear(void) { unsigned long vpn = 0; - unsigned long slot, slots, flags; + unsigned long slot, slots; struct hash_pte *hptep = htab_address; unsigned long hpte_v; unsigned long pteg_count; @@ -596,13 +604,6 @@ static void native_hpte_clear(void) pteg_count = htab_hash_mask + 1; - local_irq_save(flags); - - /* we take the tlbie lock and hold it. Some hardware will - * deadlock if we try to tlbie from two processors at once. - */ - raw_spin_lock(&native_tlbie_lock); - slots = pteg_count * HPTES_PER_GROUP; for (slot = 0; slot < slots; slot++, hptep++) { @@ -614,8 +615,8 @@ static void native_hpte_clear(void) hpte_v = be64_to_cpu(hptep->v); /* - * Call __tlbie() here rather than tlbie() since we - * already hold the native_tlbie_lock. + * Call __tlbie() here rather than tlbie() since we can't take the + * native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); @@ -625,8 +626,6 @@ static void native_hpte_clear(void) } asm volatile("eieio; tlbsync; ptesync":::"memory"); - raw_spin_unlock(&native_tlbie_lock); - local_irq_restore(flags); } /* diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 43dafb9d6a46..4d87122cf6a7 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, BUG_ON(index >= 4096); vpn = hpt_vpn(ea, vsid, ssize); - hash = hpt_hash(vpn, shift, ssize); hpte_slot_array = get_hpte_slot_array(pmdp); if (psize == MMU_PAGE_4K) { /* @@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, valid = hpte_valid(hpte_slot_array, index); if (valid) { /* update the hpte bits */ + hash = hpt_hash(vpn, shift, ssize); hidx = hpte_hash_index(hpte_slot_array, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; @@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, if (!valid) { unsigned long hpte_group; + hash = hpt_hash(vpn, shift, ssize); /* insert new entry */ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; new_pmd |= _PAGE_HASHPTE; diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 11090ab4bf59..0035d146df73 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -104,9 +104,10 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, return irq_linear_revmap(cpld_pic_host, cpld_irq); } -static void -cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) +static void cpld_pic_cascade(struct irq_desc *desc) { + unsigned int irq; + irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, &cpld_regs->pci_mask); if (irq != NO_IRQ) { diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 32cae33c4266..8fb95480fd73 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = { .irq_mask_ack = media5200_irq_mask, }; -void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void media5200_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int sub_virq, val; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 63016621aff8..78ac19aefa4d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = { .irq_set_type = mpc52xx_gpt_irq_set_type, }; -void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc) { struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc); int sub_virq; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 2944bc84b9d6..4fe2074c88cb 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -196,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) ctrl_reg |= (type << (22 - (l2irq * 2))); out_be32(&intr->ctrl, ctrl_reg); - __irq_set_handler_locked(d->irq, handler); + irq_set_handler_locked(d, handler); return 0; } diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 74861a7fb807..60e89fc9c753 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = { .irq_disable = pq2ads_pci_mask_irq }; -static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) +static void pq2ads_pci_irq_demux(struct irq_desc *desc) { struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); u32 stat, mask, pend; diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 7bfb9b184dd4..23791de7b688 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void) return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); } #ifdef CONFIG_CPM2 -static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) +static void cpm2_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index b0753e222086..5ac70de3e48a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -192,8 +192,7 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus) } #ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade_handler(unsigned int irq, - struct irq_desc *desc) +static void mpc85xx_8259_cascade_handler(struct irq_desc *desc) { unsigned int cascade_irq = i8259_irq(); @@ -202,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int irq, generic_handle_irq(cascade_irq); /* check for any interrupts from the shared IRQ line */ - handle_fasteoi_irq(irq, desc); + handle_fasteoi_irq(desc); } static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index ffdf02121a7c..f858306dba6a 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -46,7 +46,7 @@ #endif #ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc85xx_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 55a9682b9529..b02d6a5bb035 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -91,9 +91,10 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) (irq_hw_number_t)i); } -void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) +static void socrates_fpga_pic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq = irq_desc_get_irq(desc); unsigned int cascade_irq; /* diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index d5b98c0f958a..845defa1fd19 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -17,7 +17,7 @@ #include <asm/i8259.h> #ifdef CONFIG_PPC_I8259 -static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc86xx_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index d3037747031d..c289fc77b4ba 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -214,7 +214,7 @@ void mpc8xx_restart(char *cmd) panic("Restart failed\n"); } -static void cpm_cascade(unsigned int irq, struct irq_desc *desc) +static void cpm_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq = cpm_get_irq(); diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 306888acb737..e0e68a1c0d3c 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -93,7 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) dcr_write(msic->dcr_host, dcr_n, val); } -static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) +static void axon_msi_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index a15f1efc295f..9f609fc8d331 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -99,11 +99,12 @@ static void iic_ioexc_eoi(struct irq_data *d) { } -static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) +static void iic_ioexc_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cbe_iic_regs __iomem *node_iic = (void __iomem *)irq_desc_get_handler_data(desc); + unsigned int irq = irq_desc_get_irq(desc); unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; unsigned long bits, ack; int cascade; diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 1f72f4ab6353..9d27de62dc62 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -199,7 +199,7 @@ static const struct irq_domain_ops spider_host_ops = { .xlate = spider_host_xlate, }; -static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) +static void spider_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct spider_pic *pic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 15ebc4e8a151..987d1b8d68e3 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -363,7 +363,7 @@ void __init chrp_setup_arch(void) if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); } -static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void chrp_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 9dd154d6f89a..9b7975706bfc 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -120,8 +120,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) return irq_linear_revmap(h, irq); } -static void hlwd_pic_irq_cascade(unsigned int cascade_virq, - struct irq_desc *desc) +static void hlwd_pic_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *irq_domain = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 1613303177e6..8f65aa3747f5 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -42,7 +42,7 @@ static phys_addr_t pci_membase; static u_char *restart; -static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void mvme5100_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index e66ef1943338..b304a9fe55cc 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c @@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = { static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; + irq_hw_number_t hwirq; pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); @@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) if (entry->irq == NO_IRQ) continue; + hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, - virq_to_hw(entry->irq), ALLOC_CHUNK); irq_dispose_mapping(entry->irq); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); } return; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 230f3a7cdea4..4296d55e88f3 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs) * PRD component would have already got notified about this * error through other channels. * - * In any case, let us just fall through. We anyway heading - * down to panic path. + * If hardware marked this as an unrecoverable MCE, we are + * going to panic anyway. Even if it didn't, it's not safe to + * continue at this point, so we should explicitly panic. */ + + panic("PowerNV Unrecovered Machine Check"); return 0; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 2927cd5c8303..414fd1a00fda 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2049,9 +2049,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) struct iommu_table *tbl = NULL; long rc; + /* + * crashkernel= specifies the kdump kernel's maximum memory at + * some offset and there is no guaranteed the result is a power + * of 2, which will cause errors later. + */ + const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); + + /* + * In memory constrained environments, e.g. kdump kernel, the + * DMA window can be larger than available memory, which will + * cause errors later. + */ + const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); + rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, IOMMU_PAGE_SHIFT_4K, - pe->table_group.tce32_size, + window_size, POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); if (rc) { pe_err(pe, "Failed to create 32-bit TCE table, err %ld", diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 9b2480b265c0..f2dd77234240 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev) struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct msi_desc *entry; + irq_hw_number_t hwirq; if (WARN_ON(!phb)) return; @@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev) for_each_pci_msi_entry(entry, pdev) { if (entry->irq == NO_IRQ) continue; + hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); - msi_bitmap_free_hwirqs(&phb->msi_bmp, - virq_to_hw(entry->irq) - phb->msi_base, 1); irq_dispose_mapping(entry->irq); + msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); } } #endif /* CONFIG_PCI_MSI */ diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 8f70ba681a78..ca264833ee64 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void) * so clear LPCR:PECE1. We keep PECE2 enabled. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); + + /* + * Hard-disable interrupts, and then clear irq_happened flags + * that we can safely ignore while off-line, since they + * are for things for which we do no processing when off-line + * (or in the case of HMI, all the processing we need to do + * is done in lower-level real-mode code). + */ + hard_irq_disable(); + local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI); + while (!generic_check_cpu_restart(cpu)) { + /* + * Clear IPI flag, since we don't handle IPIs while + * offline, except for those when changing micro-threading + * mode, which are handled explicitly below, and those + * for coming online, which are handled via + * generic_check_cpu_restart() calls. + */ + kvmppc_set_host_ipi(cpu, 0); ppc64_runlatch_off(); @@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void) * having finished executing in a KVM guest, then srr1 * contains 0. */ - if ((srr1 & wmask) == SRR1_WAKEEE) { + if (((srr1 & wmask) == SRR1_WAKEEE) || + (local_paca->irq_happened & PACA_IRQ_EE)) { icp_native_flush_interrupt(); - local_paca->irq_happened &= PACA_IRQ_HARD_DIS; - smp_mb(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); - kvmppc_set_host_ipi(cpu, 0); } + local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL); + smp_mb(); if (cpu_core_split_required()) continue; - if (!generic_check_cpu_restart(cpu)) + if (srr1 && !generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index 09787139834d..3db53e8aff92 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = { .key = OS_AREA_DB_KEY_RTC_DIFF }; -static const struct os_area_db_id os_area_db_id_video_mode = { - .owner = OS_AREA_DB_OWNER_LINUX, - .key = OS_AREA_DB_KEY_VIDEO_MODE -}; - #define SECONDS_FROM_1970_TO_2000 946684800LL /** diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 47d9cebe7159..db17827eb746 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); of_node_put(parent); - if (!dn) + if (!dn) { + dlpar_release_drc(drc_index); return -EINVAL; + } rc = dlpar_attach_node(dn); if (rc) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 39a74fad3e04..9a83eb71b030 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -111,7 +111,7 @@ static void __init fwnmi_init(void) fwnmi_active = 1; } -static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) +static void pseries_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index a11bd1d433ad..9e86074719a9 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -155,9 +155,9 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) irqd_set_trigger_type(d, flow_type); if (flow_type & IRQ_TYPE_LEVEL_LOW) - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); else - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); /* internal IRQ senses are LEVEL_LOW * EXT IRQ and Port C IRQ senses are programmable diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 5916da1856a7..48a576aa47b9 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; struct fsl_msi *msi_data; + irq_hw_number_t hwirq; for_each_pci_msi_entry(entry, pdev) { if (entry->irq == NO_IRQ) continue; + hwirq = virq_to_hw(entry->irq); msi_data = irq_get_chip_data(entry->irq); irq_set_msi_desc(entry->irq, NULL); - msi_bitmap_free_hwirqs(&msi_data->bitmap, - virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); } return; diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index 2bcb78bb3a15..d57b77573068 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -91,7 +91,7 @@ static int gef_pic_cascade_irq; * should be masked out. */ -void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) +static void gef_pic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq; diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h index 908dbd9826b6..5bf7e4b81e36 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.h +++ b/arch/powerpc/sysdev/ge/ge_pic.h @@ -1,8 +1,6 @@ #ifndef __GEF_PIC_H__ #define __GEF_PIC_H__ - -void gef_pic_cascade(unsigned int, struct irq_desc *); unsigned int gef_pic_get_irq(void); void gef_pic_init(struct device_node *); diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 6b2b68914810..b1297ab1599b 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -624,10 +624,10 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) irqd_set_trigger_type(d, flow_type); if (flow_type & IRQ_TYPE_LEVEL_LOW) { - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); d->chip = &ipic_level_irq_chip; } else { - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); d->chip = &ipic_edge_irq_chip; } diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d93a78be4346..9a423975853a 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -55,7 +55,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) unsigned int siel = in_be32(&siu_reg->sc_siel); siel |= mpc8xx_irqd_to_bit(d); out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); } return 0; } diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 97a8ae8f94dd..537e5db85a06 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1181,7 +1181,7 @@ static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, } /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ -static void mpic_cascade(unsigned int irq, struct irq_desc *desc) +static void mpic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mpic *mpic = irq_desc_get_handler_data(desc); diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 70fbd5694a8b..2cbc7e29b85f 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; + irq_hw_number_t hwirq; for_each_pci_msi_entry(entry, pdev) { if (entry->irq == NO_IRQ) continue; + hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, - virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); } return; diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c index 24d0470c1698..8fb806135043 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_msi.c @@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; struct ppc4xx_msi *msi_data = &ppc4xx_msi; + irq_hw_number_t hwirq; dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); for_each_pci_msi_entry(entry, dev) { if (entry->irq == NO_IRQ) continue; + hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); - msi_bitmap_free_hwirqs(&msi_data->bitmap, - virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); } } diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 47b352e4bc74..fbcc1f855a7f 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -311,8 +311,8 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) } void __init qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(unsigned int irq, struct irq_desc *desc), - void (*high_handler)(unsigned int irq, struct irq_desc *desc)) + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)) { struct qe_ic *qe_ic; struct resource res; diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 57b54476e747..379de955aae3 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -428,7 +428,7 @@ void __init tsi108_pci_int_init(struct device_node *node) init_pci_source(); } -void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) +void tsi108_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = get_pci_source(); diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index d77345338671..6893d8f236df 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -194,7 +194,7 @@ static const struct irq_domain_ops uic_host_ops = { .xlate = irq_domain_xlate_twocell, }; -void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) +static void uic_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 11ac964d5175..27c936c080a6 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -54,7 +54,7 @@ static void ics_opal_unmask_irq(struct irq_data *d) if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return; - server = xics_get_irq_server(d->irq, d->affinity, 0); + server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); server = ics_opal_mangle_server(server); rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index d1c625c4cc5a..3854dd41558d 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -47,7 +47,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return; - server = xics_get_irq_server(d->irq, d->affinity, 0); + server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, DEFAULT_PRIORITY); diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 43b8b275bc5c..0f52d7955796 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -222,7 +222,7 @@ int xilinx_intc_get_irq(void) /* * Support code for cascading to 8259 interrupt controllers */ -static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) +static void xilinx_i8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index d4788111c161..fac6ac9790fa 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) KBUILD_CFLAGS += $(call cc-option,-ffreestanding) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0c98f1508542..ed7da281df66 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 82083e1fbdc4..9858b14cde1e 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c05c9e0821e3..7f14f80717d4 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1b0184a0f7f2..92805d604173 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y -CONFIG_RCU_FAST_NO_HZ=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_COMPAT_BRK is not set @@ -54,10 +53,6 @@ CONFIG_RAW_DRIVER=y # CONFIG_MONWRITER is not set # CONFIG_S390_VMUR is not set # CONFIG_HID is not set -CONFIG_MEMSTICK=y -CONFIG_MEMSTICK_DEBUG=y -CONFIG_MEMSTICK_UNSAFE_RESUME=y -CONFIG_MSPRO_BLOCK=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3d012e071647..8ced426091e1 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -35,6 +35,7 @@ */ #define KVM_NR_IRQCHIPS 1 #define KVM_IRQCHIP_NUM_PINS 4096 +#define KVM_HALT_POLL_NS_DEFAULT 0 #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f @@ -210,6 +211,7 @@ struct kvm_vcpu_stat { u32 exit_validity; u32 exit_instruction; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 instruction_lctl; u32 instruction_lctlg; diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 2a0efc63b9e5..dc19ee0c92aa 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h @@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn); int __node_distance(int a, int b); void numa_update_cpu_topology(void); -extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; extern int numa_debug_enabled; #else diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 27ebde643933..94fc55fc72ce 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu) #define cpumask_of_node cpumask_of_node static inline const struct cpumask *cpumask_of_node(int node) { - return node_to_cpumask_map[node]; + return &node_to_cpumask_map[node]; } /* diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 525cef73b085..02613bad8bbb 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -8,28 +8,8 @@ #include <uapi/asm/unistd.h> - #define __IGNORE_time -/* Ignore system calls that are also reachable via sys_socketcall */ -#define __IGNORE_recvmmsg -#define __IGNORE_sendmmsg -#define __IGNORE_socket -#define __IGNORE_socketpair -#define __IGNORE_bind -#define __IGNORE_connect -#define __IGNORE_listen -#define __IGNORE_accept4 -#define __IGNORE_getsockopt -#define __IGNORE_setsockopt -#define __IGNORE_getsockname -#define __IGNORE_getpeername -#define __IGNORE_sendto -#define __IGNORE_sendmsg -#define __IGNORE_recvfrom -#define __IGNORE_recvmsg -#define __IGNORE_shutdown - #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 59d2bb4e2d0c..a848adba1504 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -290,7 +290,26 @@ #define __NR_s390_pci_mmio_write 352 #define __NR_s390_pci_mmio_read 353 #define __NR_execveat 354 -#define NR_syscalls 355 +#define __NR_userfaultfd 355 +#define __NR_membarrier 356 +#define __NR_recvmmsg 357 +#define __NR_sendmmsg 358 +#define __NR_socket 359 +#define __NR_socketpair 360 +#define __NR_bind 361 +#define __NR_connect 362 +#define __NR_listen 363 +#define __NR_accept4 364 +#define __NR_getsockopt 365 +#define __NR_setsockopt 366 +#define __NR_getsockname 367 +#define __NR_getpeername 368 +#define __NR_sendto 369 +#define __NR_sendmsg 370 +#define __NR_recvfrom 371 +#define __NR_recvmsg 372 +#define __NR_shutdown 373 +#define NR_syscalls 374 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 48c9af7a7683..3aeeb1b562c0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); + DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index eb4664238613..e0f9d270b30f 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -48,6 +48,19 @@ typedef struct struct ucontext32 uc; } rt_sigframe32; +static inline void sigset_to_sigset32(unsigned long *set64, + compat_sigset_word *set32) +{ + set32[0] = (compat_sigset_word) set64[0]; + set32[1] = (compat_sigset_word)(set64[0] >> 32); +} + +static inline void sigset32_to_sigset(compat_sigset_word *set32, + unsigned long *set64) +{ + set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32); +} + int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; @@ -281,10 +294,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = task_pt_regs(current); sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; + compat_sigset_t cset; sigset_t set; - if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) + if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; + sigset32_to_sigset(cset.sig, set.sig); set_current_blocked(&set); save_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) @@ -302,10 +317,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; + compat_sigset_t cset; sigset_t set; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset))) goto badframe; + sigset32_to_sigset(cset.sig, set.sig); set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; @@ -377,7 +394,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, return -EFAULT; /* Create struct sigcontext32 on the signal stack */ - memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32); + sigset_to_sigset32(set->sig, sc.oldmask); sc.sregs = (__u32)(unsigned long __force) &frame->sregs; if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc))) return -EFAULT; @@ -438,6 +455,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { + compat_sigset_t cset; rt_sigframe32 __user *frame; unsigned long restorer; size_t frame_size; @@ -485,11 +503,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, store_sigregs(); /* Create ucontext on the signal stack. */ + sigset_to_sigset32(set->sig, cset.sig); if (__put_user(uc_flags, &frame->uc.uc_flags) || __put_user(0, &frame->uc.uc_link) || __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || save_sigregs32(regs, &frame->uc.uc_mcontext) || - __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) || + __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) || save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) return -EFAULT; diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index f8498dde67b1..09f194052df3 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -52,15 +52,13 @@ * the regular system call wrappers. */ #define COMPAT_SYSCALL_WRAPx(x, name, ...) \ - asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ - asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ - { \ - return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ - } +asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ +asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ +asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ +{ \ + return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ +} -COMPAT_SYSCALL_WRAP1(exit, int, error_code); -COMPAT_SYSCALL_WRAP1(close, unsigned int, fd); COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); @@ -68,23 +66,16 @@ COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename); COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); -COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds); COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); -COMPAT_SYSCALL_WRAP1(nice, int, increment); -COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig); COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); -COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes); COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); -COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid); -COMPAT_SYSCALL_WRAP1(umask, int, mask); COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); -COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd); COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); @@ -93,37 +84,23 @@ COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library); COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); -COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode); -COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who); -COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval); COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); -COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd); COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); -COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid); -COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd); COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); -COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality); COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); -COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd); COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); -COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid); -COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd); COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); -COMPAT_SYSCALL_WRAP1(mlockall, int, flags); COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); -COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid); -COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy); -COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy); COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); @@ -131,20 +108,11 @@ COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size); COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); -COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid); -COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid); COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); -COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group); -COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid); COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); -COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid); COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); -COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid); -COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid); -COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid); -COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid); COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); @@ -161,23 +129,16 @@ COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size); COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); -COMPAT_SYSCALL_WRAP1(exit_group, int, error_code); COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); -COMPAT_SYSCALL_WRAP1(epoll_create, int, size); COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); -COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id); -COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id); COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); -COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio); -COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who); COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); -COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd); COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); @@ -192,23 +153,11 @@ COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags); COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); -COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count); -COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags); -COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags); -COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags); COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); -COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags); -COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags); -COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig); -COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig); COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); -COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags); COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); -COMPAT_SYSCALL_WRAP1(syncfs, int, fd); -COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype); -COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum); COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); @@ -220,3 +169,10 @@ COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, fla COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); +COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec); +COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen); +COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen); +COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags); +COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); +COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); +COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 247b7aae4c6d..582fe44ab07c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -733,6 +733,14 @@ ENTRY(psw_idle) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) +#ifdef CONFIG_SMP + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz .Lpsw_idle_stcctm + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) +.Lpsw_idle_stcctm: +#endif STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: @@ -1159,7 +1167,27 @@ cleanup_critical: jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) -1: # account system time going idle +1: # calculate idle cycles +#ifdef CONFIG_SMP + clg %r9,BASED(.Lcleanup_idle_insn) + jl 3f + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz 3f + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) + larl %r3,mt_cycles + ag %r3,__LC_PERCPU_OFFSET + la %r4,__SF_EMPTY+16(%r15) +2: lg %r0,0(%r3) + slg %r0,0(%r4) + alg %r0,64(%r4) + stg %r0,0(%r3) + la %r3,8(%r3) + la %r4,8(%r4) + brct %r1,2b +#endif +3: # account system time going idle lg %r9,__LC_STEAL_TIMER alg %r9,__CLOCK_IDLE_ENTER(%r2) slg %r9,__LC_LAST_UPDATE_CLOCK @@ -1191,6 +1219,7 @@ cleanup_critical: clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) jhe 1f lg %r2,__LC_CURRENT + aghi %r2,__TASK_thread 0: # Store floating-point controls stfpc __THREAD_FPU_fpc(%r2) 1: # Load register save area and check if VX is active @@ -1252,6 +1281,7 @@ cleanup_critical: clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) jhe 6f lg %r4,__LC_CURRENT + aghi %r4,__TASK_thread lfpc __THREAD_FPU_fpc(%r4) tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 56fdad479115..a9563409c36e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -157,10 +157,14 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc) cpuhw = &get_cpu_var(cpu_hw_events); - /* check authorization for cpu counter sets */ + /* Check authorization for cpu counter sets. + * If the particular CPU counter set is not authorized, + * return with -ENOENT in order to fall back to other + * PMUs that might suffice the event request. + */ ctrs_state = cpumf_state_ctl[hwc->config_base]; if (!(ctrs_state & cpuhw->info.auth_ctl)) - err = -EPERM; + err = -ENOENT; put_cpu_var(cpu_hw_events); return err; @@ -536,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) */ if (!(cpuhw->flags & PERF_EVENT_TXN)) if (validate_ctr_auth(&event->hw)) - return -EPERM; + return -ENOENT; ctr_set_enable(&cpuhw->state, event->hw.config_base); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; @@ -611,7 +615,7 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu) state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); state >>= CPUMF_LCCTL_ENABLE_SHIFT; if ((state & cpuhw->info.auth_ctl) != state) - return -EPERM; + return -ENOENT; cpuhw->flags &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index ca6294645dd3..2d6b6e81f812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -30,6 +30,9 @@ ENTRY(swsusp_arch_suspend) aghi %r15,-STACK_FRAME_OVERHEAD stg %r1,__SF_BACKCHAIN(%r15) + /* Store FPU registers */ + brasl %r14,save_fpu_regs + /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb @@ -47,23 +50,6 @@ ENTRY(swsusp_arch_suspend) /* Store registers */ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ - stfpc 0x31c(%r1) /* store fpu control */ - std 0,0x200(%r1) /* store f0 */ - std 1,0x208(%r1) /* store f1 */ - std 2,0x210(%r1) /* store f2 */ - std 3,0x218(%r1) /* store f3 */ - std 4,0x220(%r1) /* store f4 */ - std 5,0x228(%r1) /* store f5 */ - std 6,0x230(%r1) /* store f6 */ - std 7,0x238(%r1) /* store f7 */ - std 8,0x240(%r1) /* store f8 */ - std 9,0x248(%r1) /* store f9 */ - std 10,0x250(%r1) /* store f10 */ - std 11,0x258(%r1) /* store f11 */ - std 12,0x260(%r1) /* store f12 */ - std 13,0x268(%r1) /* store f13 */ - std 14,0x270(%r1) /* store f14 */ - std 15,0x278(%r1) /* store f15 */ stam %a0,%a15,0x340(%r1) /* store access registers */ stctg %c0,%c15,0x380(%r1) /* store control registers */ stmg %r0,%r15,0x280(%r1) /* store general registers */ @@ -249,24 +235,6 @@ restore_registers: lctlg %c0,%c15,0x380(%r13) /* load control registers */ lam %a0,%a15,0x340(%r13) /* load access registers */ - lfpc 0x31c(%r13) /* load fpu control */ - ld 0,0x200(%r13) /* load f0 */ - ld 1,0x208(%r13) /* load f1 */ - ld 2,0x210(%r13) /* load f2 */ - ld 3,0x218(%r13) /* load f3 */ - ld 4,0x220(%r13) /* load f4 */ - ld 5,0x228(%r13) /* load f5 */ - ld 6,0x230(%r13) /* load f6 */ - ld 7,0x238(%r13) /* load f7 */ - ld 8,0x240(%r13) /* load f8 */ - ld 9,0x248(%r13) /* load f9 */ - ld 10,0x250(%r13) /* load f10 */ - ld 11,0x258(%r13) /* load f11 */ - ld 12,0x260(%r13) /* load f12 */ - ld 13,0x268(%r13) /* load f13 */ - ld 14,0x270(%r13) /* load f14 */ - ld 15,0x278(%r13) /* load f15 */ - /* Load old stack */ lg %r15,0x2f8(%r13) diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index f3f4a137aef6..8c56929c8d82 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -9,12 +9,12 @@ #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) NI_SYSCALL /* 0 */ -SYSCALL(sys_exit,compat_sys_exit) +SYSCALL(sys_exit,sys_exit) SYSCALL(sys_fork,sys_fork) SYSCALL(sys_read,compat_sys_s390_read) SYSCALL(sys_write,compat_sys_s390_write) SYSCALL(sys_open,compat_sys_open) /* 5 */ -SYSCALL(sys_close,compat_sys_close) +SYSCALL(sys_close,sys_close) SYSCALL(sys_restart_syscall,sys_restart_syscall) SYSCALL(sys_creat,compat_sys_creat) SYSCALL(sys_link,compat_sys_link) @@ -35,21 +35,21 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ SYSCALL(sys_ptrace,compat_sys_ptrace) -SYSCALL(sys_alarm,compat_sys_alarm) +SYSCALL(sys_alarm,sys_alarm) NI_SYSCALL /* old fstat syscall */ SYSCALL(sys_pause,sys_pause) SYSCALL(sys_utime,compat_sys_utime) /* 30 */ NI_SYSCALL /* old stty syscall */ NI_SYSCALL /* old gtty syscall */ SYSCALL(sys_access,compat_sys_access) -SYSCALL(sys_nice,compat_sys_nice) +SYSCALL(sys_nice,sys_nice) NI_SYSCALL /* 35 old ftime syscall */ SYSCALL(sys_sync,sys_sync) -SYSCALL(sys_kill,compat_sys_kill) +SYSCALL(sys_kill,sys_kill) SYSCALL(sys_rename,compat_sys_rename) SYSCALL(sys_mkdir,compat_sys_mkdir) SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ -SYSCALL(sys_dup,compat_sys_dup) +SYSCALL(sys_dup,sys_dup) SYSCALL(sys_pipe,compat_sys_pipe) SYSCALL(sys_times,compat_sys_times) NI_SYSCALL /* old prof syscall */ @@ -65,13 +65,13 @@ NI_SYSCALL /* old lock syscall */ SYSCALL(sys_ioctl,compat_sys_ioctl) SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ NI_SYSCALL /* intel mpx syscall */ -SYSCALL(sys_setpgid,compat_sys_setpgid) +SYSCALL(sys_setpgid,sys_setpgid) NI_SYSCALL /* old ulimit syscall */ NI_SYSCALL /* old uname syscall */ -SYSCALL(sys_umask,compat_sys_umask) /* 60 */ +SYSCALL(sys_umask,sys_umask) /* 60 */ SYSCALL(sys_chroot,compat_sys_chroot) SYSCALL(sys_ustat,compat_sys_ustat) -SYSCALL(sys_dup2,compat_sys_dup2) +SYSCALL(sys_dup2,sys_dup2) SYSCALL(sys_getppid,sys_getppid) SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ SYSCALL(sys_setsid,sys_setsid) @@ -102,10 +102,10 @@ SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ SYSCALL(sys_munmap,compat_sys_munmap) SYSCALL(sys_truncate,compat_sys_truncate) SYSCALL(sys_ftruncate,compat_sys_ftruncate) -SYSCALL(sys_fchmod,compat_sys_fchmod) +SYSCALL(sys_fchmod,sys_fchmod) SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ -SYSCALL(sys_getpriority,compat_sys_getpriority) -SYSCALL(sys_setpriority,compat_sys_setpriority) +SYSCALL(sys_getpriority,sys_getpriority) +SYSCALL(sys_setpriority,sys_setpriority) NI_SYSCALL /* old profil syscall */ SYSCALL(sys_statfs,compat_sys_statfs) SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ @@ -126,7 +126,7 @@ SYSCALL(sys_wait4,compat_sys_wait4) SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ SYSCALL(sys_sysinfo,compat_sys_sysinfo) SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) -SYSCALL(sys_fsync,compat_sys_fsync) +SYSCALL(sys_fsync,sys_fsync) SYSCALL(sys_sigreturn,compat_sys_sigreturn) SYSCALL(sys_clone,compat_sys_clone) /* 120 */ SYSCALL(sys_setdomainname,compat_sys_setdomainname) @@ -140,35 +140,35 @@ SYSCALL(sys_init_module,compat_sys_init_module) SYSCALL(sys_delete_module,compat_sys_delete_module) NI_SYSCALL /* 130: old get_kernel_syms */ SYSCALL(sys_quotactl,compat_sys_quotactl) -SYSCALL(sys_getpgid,compat_sys_getpgid) -SYSCALL(sys_fchdir,compat_sys_fchdir) +SYSCALL(sys_getpgid,sys_getpgid) +SYSCALL(sys_fchdir,sys_fchdir) SYSCALL(sys_bdflush,compat_sys_bdflush) SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ -SYSCALL(sys_s390_personality,compat_sys_s390_personality) +SYSCALL(sys_s390_personality,sys_s390_personality) NI_SYSCALL /* for afs_syscall */ SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ SYSCALL(sys_getdents,compat_sys_getdents) SYSCALL(sys_select,compat_sys_select) -SYSCALL(sys_flock,compat_sys_flock) +SYSCALL(sys_flock,sys_flock) SYSCALL(sys_msync,compat_sys_msync) SYSCALL(sys_readv,compat_sys_readv) /* 145 */ SYSCALL(sys_writev,compat_sys_writev) -SYSCALL(sys_getsid,compat_sys_getsid) -SYSCALL(sys_fdatasync,compat_sys_fdatasync) +SYSCALL(sys_getsid,sys_getsid) +SYSCALL(sys_fdatasync,sys_fdatasync) SYSCALL(sys_sysctl,compat_sys_sysctl) SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ SYSCALL(sys_munlock,compat_sys_munlock) -SYSCALL(sys_mlockall,compat_sys_mlockall) +SYSCALL(sys_mlockall,sys_mlockall) SYSCALL(sys_munlockall,sys_munlockall) SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) -SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) +SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler) SYSCALL(sys_sched_yield,sys_sched_yield) -SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) -SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ +SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max) +SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min) /* 160 */ SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) SYSCALL(sys_nanosleep,compat_sys_nanosleep) SYSCALL(sys_mremap,compat_sys_mremap) @@ -211,20 +211,20 @@ SYSCALL(sys_getuid,sys_getuid) SYSCALL(sys_getgid,sys_getgid) /* 200 */ SYSCALL(sys_geteuid,sys_geteuid) SYSCALL(sys_getegid,sys_getegid) -SYSCALL(sys_setreuid,compat_sys_setreuid) -SYSCALL(sys_setregid,compat_sys_setregid) +SYSCALL(sys_setreuid,sys_setreuid) +SYSCALL(sys_setregid,sys_setregid) SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ SYSCALL(sys_setgroups,compat_sys_setgroups) -SYSCALL(sys_fchown,compat_sys_fchown) -SYSCALL(sys_setresuid,compat_sys_setresuid) +SYSCALL(sys_fchown,sys_fchown) +SYSCALL(sys_setresuid,sys_setresuid) SYSCALL(sys_getresuid,compat_sys_getresuid) -SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ +SYSCALL(sys_setresgid,sys_setresgid) /* 210 */ SYSCALL(sys_getresgid,compat_sys_getresgid) SYSCALL(sys_chown,compat_sys_chown) -SYSCALL(sys_setuid,compat_sys_setuid) -SYSCALL(sys_setgid,compat_sys_setgid) -SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ -SYSCALL(sys_setfsgid,compat_sys_setfsgid) +SYSCALL(sys_setuid,sys_setuid) +SYSCALL(sys_setgid,sys_setgid) +SYSCALL(sys_setfsuid,sys_setfsuid) /* 215 */ +SYSCALL(sys_setfsgid,sys_setfsgid) SYSCALL(sys_pivot_root,compat_sys_pivot_root) SYSCALL(sys_mincore,compat_sys_mincore) SYSCALL(sys_madvise,compat_sys_madvise) @@ -245,19 +245,19 @@ SYSCALL(sys_removexattr,compat_sys_removexattr) SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ SYSCALL(sys_gettid,sys_gettid) -SYSCALL(sys_tkill,compat_sys_tkill) +SYSCALL(sys_tkill,sys_tkill) SYSCALL(sys_futex,compat_sys_futex) SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ -SYSCALL(sys_tgkill,compat_sys_tgkill) +SYSCALL(sys_tgkill,sys_tgkill) NI_SYSCALL /* reserved for TUX */ SYSCALL(sys_io_setup,compat_sys_io_setup) SYSCALL(sys_io_destroy,compat_sys_io_destroy) SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ SYSCALL(sys_io_submit,compat_sys_io_submit) SYSCALL(sys_io_cancel,compat_sys_io_cancel) -SYSCALL(sys_exit_group,compat_sys_exit_group) -SYSCALL(sys_epoll_create,compat_sys_epoll_create) +SYSCALL(sys_exit_group,sys_exit_group) +SYSCALL(sys_epoll_create,sys_epoll_create) SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) @@ -265,8 +265,8 @@ SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64) SYSCALL(sys_timer_create,compat_sys_timer_create) SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) -SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) -SYSCALL(sys_timer_delete,compat_sys_timer_delete) +SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun) +SYSCALL(sys_timer_delete,sys_timer_delete) SYSCALL(sys_clock_settime,compat_sys_clock_settime) SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ SYSCALL(sys_clock_getres,compat_sys_clock_getres) @@ -290,11 +290,11 @@ SYSCALL(sys_add_key,compat_sys_add_key) SYSCALL(sys_request_key,compat_sys_request_key) SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ SYSCALL(sys_waitid,compat_sys_waitid) -SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) -SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) +SYSCALL(sys_ioprio_set,sys_ioprio_set) +SYSCALL(sys_ioprio_get,sys_ioprio_get) SYSCALL(sys_inotify_init,sys_inotify_init) SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ -SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) +SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch) SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) SYSCALL(sys_openat,compat_sys_openat) SYSCALL(sys_mkdirat,compat_sys_mkdirat) @@ -326,31 +326,31 @@ SYSCALL(sys_fallocate,compat_sys_s390_fallocate) SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ SYSCALL(sys_signalfd,compat_sys_signalfd) NI_SYSCALL /* 317 old sys_timer_fd */ -SYSCALL(sys_eventfd,compat_sys_eventfd) -SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) +SYSCALL(sys_eventfd,sys_eventfd) +SYSCALL(sys_timerfd_create,sys_timerfd_create) SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) SYSCALL(sys_signalfd4,compat_sys_signalfd4) -SYSCALL(sys_eventfd2,compat_sys_eventfd2) -SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) +SYSCALL(sys_eventfd2,sys_eventfd2) +SYSCALL(sys_inotify_init1,sys_inotify_init1) SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ -SYSCALL(sys_dup3,compat_sys_dup3) -SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) +SYSCALL(sys_dup3,sys_dup3) +SYSCALL(sys_epoll_create1,sys_epoll_create1) SYSCALL(sys_preadv,compat_sys_preadv) SYSCALL(sys_pwritev,compat_sys_pwritev) SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) -SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) +SYSCALL(sys_fanotify_init,sys_fanotify_init) SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) SYSCALL(sys_prlimit64,compat_sys_prlimit64) SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) -SYSCALL(sys_syncfs,compat_sys_syncfs) -SYSCALL(sys_setns,compat_sys_setns) +SYSCALL(sys_syncfs,sys_syncfs) +SYSCALL(sys_setns,sys_setns) SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) -SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) +SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr) SYSCALL(sys_kcmp,compat_sys_kcmp) SYSCALL(sys_finit_module,compat_sys_finit_module) SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ @@ -363,3 +363,22 @@ SYSCALL(sys_bpf,compat_sys_bpf) SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) SYSCALL(sys_execveat,compat_sys_execveat) +SYSCALL(sys_userfaultfd,sys_userfaultfd) /* 355 */ +SYSCALL(sys_membarrier,sys_membarrier) +SYSCALL(sys_recvmmsg,compat_sys_recvmmsg) +SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) +SYSCALL(sys_socket,sys_socket) +SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ +SYSCALL(sys_bind,sys_bind) +SYSCALL(sys_connect,sys_connect) +SYSCALL(sys_listen,sys_listen) +SYSCALL(sys_accept4,sys_accept4) +SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ +SYSCALL(sys_setsockopt,compat_sys_setsockopt) +SYSCALL(sys_getsockname,compat_sys_getsockname) +SYSCALL(sys_getpeername,compat_sys_getpeername) +SYSCALL(sys_sendto,compat_sys_sendto) +SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */ +SYSCALL(sys_recvfrom,compat_sys_recvfrom) +SYSCALL(sys_recvmsg,compat_sys_recvmsg) +SYSCALL(sys_shutdown,sys_shutdown) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index b9ce650e9e99..dafc44f519c3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock); static atomic64_t virt_timer_current; static atomic64_t virt_timer_elapsed; -static DEFINE_PER_CPU(u64, mt_cycles[32]); +DEFINE_PER_CPU(u64, mt_cycles[8]); static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_jiffies); @@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed) return elapsed >= atomic64_read(&virt_timer_current); } +static void update_mt_scaling(void) +{ + u64 cycles_new[8], *cycles_old; + u64 delta, fac, mult, div; + int i; + + stcctm5(smp_cpu_mtid + 1, cycles_new); + cycles_old = this_cpu_ptr(mt_cycles); + fac = 1; + mult = div = 0; + for (i = 0; i <= smp_cpu_mtid; i++) { + delta = cycles_new[i] - cycles_old[i]; + div += delta; + mult *= i + 1; + mult += delta * fac; + fac *= i + 1; + } + div *= fac; + if (div > 0) { + /* Update scaling factor */ + __this_cpu_write(mt_scaling_mult, mult); + __this_cpu_write(mt_scaling_div, div); + memcpy(cycles_old, cycles_new, + sizeof(u64) * (smp_cpu_mtid + 1)); + } + __this_cpu_write(mt_scaling_jiffies, jiffies_64); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. @@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) struct thread_info *ti = task_thread_info(tsk); u64 timer, clock, user, system, steal; u64 user_scaled, system_scaled; - int i; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -85,30 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; - /* Do MT utilization calculation */ + /* Update MT utilization calculation */ if (smp_cpu_mtid && - time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { - u64 cycles_new[32], *cycles_old; - u64 delta, mult, div; - - cycles_old = this_cpu_ptr(mt_cycles); - if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { - mult = div = 0; - for (i = 0; i <= smp_cpu_mtid; i++) { - delta = cycles_new[i] - cycles_old[i]; - mult += delta; - div += (i + 1) * delta; - } - if (mult > 0) { - /* Update scaling factor */ - __this_cpu_write(mt_scaling_mult, mult); - __this_cpu_write(mt_scaling_div, div); - memcpy(cycles_old, cycles_new, - sizeof(u64) * (smp_cpu_mtid + 1)); - } - } - __this_cpu_write(mt_scaling_jiffies, jiffies_64); - } + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); user = S390_lowcore.user_timer - ti->user_timer; S390_lowcore.steal_timer -= user; @@ -177,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk) S390_lowcore.last_update_timer = get_vtimer(); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + /* Update MT utilization calculation */ + if (smp_cpu_mtid && + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); + system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c91eb941b444..0a67c40eece9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, @@ -1574,7 +1575,7 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { - atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 7de4e2f780d7..30b2698a28e2 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core) cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask); - cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); + cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); top->node_id = core_node(core)->id; } } @@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa) /* Clear all node masks */ for (i = 0; i < MAX_NUMNODES; i++) - cpumask_clear(node_to_cpumask_map[i]); + cpumask_clear(&node_to_cpumask_map[i]); /* Rebuild all masks */ toptree_for_each(core, numa, CORE) diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 09b1d2355bd9..43f32ce60aa3 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -23,7 +23,7 @@ pg_data_t *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); -cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +cpumask_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); const struct numa_mode numa_mode_plain = { @@ -144,7 +144,7 @@ void __init numa_setup(void) static int __init numa_init_early(void) { /* Attach all possible CPUs to node 0 for now. */ - cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); return 0; } early_initcall(numa_init_early); diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -13,3 +13,4 @@ generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h +generic-y += word-at-a-time.h diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index 6f97a8f0d0d6..6129aef6db76 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c @@ -29,7 +29,7 @@ static void __iomem *se7343_irq_regs; struct irq_domain *se7343_irq_domain; -static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc) +static void se7343_irq_demux(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index 60aebd14ccf8..24c74a88290c 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c @@ -28,7 +28,7 @@ static void __iomem *se7722_irq_regs; struct irq_domain *se7722_irq_domain; -static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) +static void se7722_irq_demux(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c index 9f2033898652..64e681e66c57 100644 --- a/arch/sh/boards/mach-se/7724/irq.c +++ b/arch/sh/boards/mach-se/7724/irq.c @@ -92,7 +92,7 @@ static struct irq_chip se7724_irq_chip __read_mostly = { .irq_unmask = enable_se7724_irq, }; -static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void se7724_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct fpga_irq set = get_fpga_irq(irq); diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index 24555c364d5b..1fb2cbee25f2 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c @@ -60,7 +60,7 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) return virq; } -static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void x3proto_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index e9735616bdc8..8180092502f7 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c @@ -56,7 +56,7 @@ static struct irq_chip hd64461_irq_chip = { .irq_unmask = hd64461_unmask_irq, }; -static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) +static void hd64461_irq_demux(struct irq_desc *desc) { unsigned short intv = __raw_readw(HD64461_NIRR); unsigned int ext_irq = HD64461_IRQBASE; diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index fe20d14ae051..ceb5201a30ed 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); +#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) struct page; struct vm_area_struct; diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 2e48eb8813ff..c90930de76ba 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, @@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = ctr_crypt, .decrypt = ctr_crypt, diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 6bf2479a12fb..561a84d93cf6 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, + .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index dd6a34fa6e19..61af794aa2d3 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, .setkey = des_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, @@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_set_key, .encrypt = cbc3_encrypt, .decrypt = cbc3_decrypt, diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 0299f052a2ef..42efcf85f721 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -53,7 +53,7 @@ static inline unsigned int leon_eirq_get(int cpu) } /* Handle one or multiple IRQs from the extended interrupt controller */ -static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) +static void leon_handle_ext_irq(struct irq_desc *desc) { unsigned int eirq; struct irq_bucket *p; diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 3382f7b3eeef..1e77128a8f88 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -357,7 +357,7 @@ static struct irq_chip grpci1_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci1_pci_flow_irq(struct irq_desc *desc) { struct grpci1_priv *priv = grpci1priv; int i, ack = 0; diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 814fb1729b12..f727c4de1316 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -498,7 +498,7 @@ static struct irq_chip grpci2_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci2_pci_flow_irq(struct irq_desc *desc) { struct grpci2_priv *priv = grpci2priv; int i, ack = 0; diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/string.h> #include <gxio/iorpc_globals.h> #include <gxio/iorpc_mpipe.h> @@ -29,32 +30,6 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow -/** - * strscpy - Copy a C-string into a sized buffer, but only if it fits - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @size: size of destination buffer - * - * Use this routine to avoid copying too-long strings. - * The routine returns the total number of bytes copied - * (including the trailing NUL) or zero if the buffer wasn't - * big enough. To ensure that programmers pay attention - * to the return code, the destination has a single NUL - * written at the front (if size is non-zero) when the - * buffer is not big enough. - */ -static size_t strscpy(char *dest, const char *src, size_t size) -{ - size_t len = strnlen(src, size) + 1; - if (len > size) { - if (size) - dest[0] = '\0'; - return 0; - } - memcpy(dest, src, len); - return len; -} - int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); @@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h index 9e5ce0d7b292..b66a693c2c34 100644 --- a/arch/tile/include/asm/word-at-a-time.h +++ b/arch/tile/include/asm/word-at-a-time.h @@ -6,7 +6,7 @@ struct word_at_a_time { /* unused */ }; #define WORD_AT_A_TIME_CONSTANTS {} -/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ +/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { @@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask) #endif } +#ifdef __BIG_ENDIAN +#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) +#else +#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b3f73fd764a3..4c017d0d2de8 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -304,17 +304,16 @@ static struct irq_chip tilegx_legacy_irq_chip = { * to Linux which just calls handle_level_irq() after clearing the * MAC INTx Assert status bit associated with this interrupt. */ -static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) +static void trio_handle_level_irq(struct irq_desc *desc) { struct pci_controller *controller = irq_desc_get_handler_data(desc); gxio_trio_context_t *trio_context = controller->trio; uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); - unsigned int irq = irq_desc_get_irq(desc); int mac = controller->mac; unsigned int reg_offset; uint64_t level_mask; - handle_level_irq(irq, desc); + handle_level_irq(desc); /* * Clear the INTx Level status, otherwise future interrupts are diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/usb/tilegx.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/types.h> static u64 ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/um/Makefile b/arch/um/Makefile index 098ab3333e7c..e3abe6f3156d 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE) USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \ - -D_FILE_OFFSET_BITS=64 -idirafter include \ - -D__KERNEL__ -D__UM_HOST__ + -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \ + -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__ #This will adjust *FLAGS accordingly to the platform. include $(ARCH_DIR)/Makefile-os-$(OS) diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,4 +25,5 @@ generic-y += preempt.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index d8a9fce6ee2e..98783dd0fa2e 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, show_regs(container_of(regs, struct pt_regs, regs)); panic("Segfault with no mm"); } - else if (!is_user && address < TASK_SIZE) { + else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) { show_regs(container_of(regs, struct pt_regs, regs)); panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", address, ip); diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index e3ee4a51ef63..3f02d4232812 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c @@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv) "ret = %d\n", -n); ret = n; } - CATCH_EINTR(waitpid(pid, NULL, __WCLONE)); + CATCH_EINTR(waitpid(pid, NULL, __WALL)); } out_free2: @@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags, return err; } if (stack_out == NULL) { - CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE)); + CATCH_EINTR(pid = waitpid(pid, &status, __WALL)); if (pid < 0) { err = -errno; printk(UM_KERN_ERR "run_helper_thread - wait failed, " @@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags, int helper_wait(int pid) { int ret, status; - int wflags = __WCLONE; + int wflags = __WALL; CATCH_EINTR(ret = waitpid(pid, &status, wflags)); if (ret < 0) { diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -62,4 +62,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index c53729d92e8d..eb1fd0030359 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c @@ -112,7 +112,7 @@ static struct irq_chip puv3_low_gpio_chip = { * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ -static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc) +static void puv3_gpio_handler(struct irq_desc *desc) { unsigned int mask, irq; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7aef2d52daa0..96d058a87100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1006,7 +1006,7 @@ config X86_THERMAL_VECTOR depends on X86_MCE_INTEL config X86_LEGACY_VM86 - bool "Legacy VM86 support (obsolete)" + bool "Legacy VM86 support" default n depends on X86_32 ---help--- @@ -1018,19 +1018,20 @@ config X86_LEGACY_VM86 available to accelerate real mode DOS programs. However, any recent version of DOSEMU, X, or vbetool should be fully functional even without kernel VM86 support, as they will all - fall back to (pretty well performing) software emulation. + fall back to software emulation. Nevertheless, if you are using + a 16-bit DOS program where 16-bit performance matters, vm86 + mode might be faster than emulation and you might want to + enable this option. - Anything that works on a 64-bit kernel is unlikely to need - this option, as 64-bit kernels don't, and can't, support V8086 - mode. This option is also unrelated to 16-bit protected mode - and is not needed to run most 16-bit programs under Wine. + Note that any app that works on a 64-bit kernel is unlikely to + need this option, as 64-bit kernels don't, and can't, support + V8086 mode. This option is also unrelated to 16-bit protected + mode and is not needed to run most 16-bit programs under Wine. - Enabling this option adds considerable attack surface to the - kernel and slows down system calls and exception handling. + Enabling this option increases the complexity of the kernel + and slows down exception handling a tiny bit. - Unless you use very old userspace or need the last drop of - performance in your real mode DOS games and can't use KVM, - say N here. + If unsure, say N here. config VM86 bool @@ -1307,6 +1308,7 @@ config HIGHMEM config X86_PAE bool "PAE (Physical Address Extension) Support" depends on X86_32 && !HIGHMEM4G + select SWIOTLB ---help--- PAE is required for NX support, and furthermore enables larger swapspace support for non-overcommit purposes. It diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index ee1b6d346b98..db51c1f27446 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -667,6 +667,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, bool conout_found = false; void *dummy = NULL; u32 h = handles[i]; + u32 current_fb_base; status = efi_call_early(handle_protocol, h, proto, (void **)&gop32); @@ -678,7 +679,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(gop32, &info, &size, &fb_base); + status = __gop_query32(gop32, &info, &size, ¤t_fb_base); if (status == EFI_SUCCESS && (!first_gop || conout_found)) { /* * Systems that use the UEFI Console Splitter may @@ -692,6 +693,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, pixel_format = info->pixel_format; pixel_info = info->pixel_information; pixels_per_scan_line = info->pixels_per_scan_line; + fb_base = current_fb_base; /* * Once we've found a GOP supporting ConOut, @@ -770,6 +772,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, bool conout_found = false; void *dummy = NULL; u64 h = handles[i]; + u32 current_fb_base; status = efi_call_early(handle_protocol, h, proto, (void **)&gop64); @@ -781,7 +784,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(gop64, &info, &size, &fb_base); + status = __gop_query64(gop64, &info, &size, ¤t_fb_base); if (status == EFI_SUCCESS && (!first_gop || conout_found)) { /* * Systems that use the UEFI Console Splitter may @@ -795,6 +798,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, pixel_format = info->pixel_format; pixel_info = info->pixel_information; pixels_per_scan_line = info->pixels_per_scan_line; + fb_base = current_fb_base; /* * Once we've found a GOP supporting ConOut, diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 80a0e4389c9a..bacaa13acac5 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void) { const char *feature_name; + if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) { + pr_info("AVX or AES-NI instructions are not detected.\n"); + return -ENODEV; + } + if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { pr_info("CPU feature '%s' is not supported.\n", feature_name); return -ENODEV; diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d3033183ed70..055a01de7c8d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1128,7 +1128,18 @@ END(error_exit) /* Runs on exception stack */ ENTRY(nmi) + /* + * Fix up the exception frame if we're on Xen. + * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most + * one value to the stack on native, so it may clobber the rdx + * scratch slot, but it won't clobber any of the important + * slots past it. + * + * Xen is a different story, because the Xen frame itself overlaps + * the "NMI executing" variable. + */ PARAVIRT_ADJUST_EXCEPTION_FRAME + /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. @@ -1179,9 +1190,12 @@ ENTRY(nmi) * we don't want to enable interrupts, because then we'll end * up in an awkward situation in which IRQs are on but NMIs * are off. + * + * We also must not push anything to the stack before switching + * stacks lest we corrupt the "NMI executing" variable. */ - SWAPGS + SWAPGS_UNSAFE_STACK cld movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 477fc28050e4..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -193,7 +193,7 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ @@ -241,6 +241,7 @@ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 155162ea0e00..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -86,6 +86,18 @@ extern u64 asmlinkage efi_call(void *fp, ...); extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); +#ifdef CONFIG_KASAN +/* + * CONFIG_KASAN may redefine memset to __memset. __memset function is present + * only in kernel binary. Since the EFI stub linked into a separate binary it + * doesn't have __memset(). So we should use standard memset from + * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. + */ +#undef memcpy +#undef memset +#undef memmove +#endif + #endif /* CONFIG_X86_32 */ extern struct efi_scratch efi_scratch; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c12e845f59e6..3a36ee704c30 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -40,6 +40,7 @@ #define KVM_PIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 +#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS @@ -711,6 +712,7 @@ struct kvm_vcpu_stat { u32 nmi_window_exits; u32 halt_exits; u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; u32 request_irq_exits; u32 irq_exits; @@ -1224,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); int kvm_is_in_guest(void); -int __x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); -int x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); +int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); +int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index c1c0a1c14344..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -141,6 +141,8 @@ #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) +#define MSR_PEBS_FRONTEND 0x000003f7 + #define MSR_IA32_POWER_CTL 0x000001fc #define MSR_IA32_MC0_CTL 0x00000400 @@ -331,6 +333,7 @@ /* C1E active bits in int pending message */ #define K8_INTP_C1E_ACTIVE_MASK 0x18000000 #define MSR_K8_TSEG_ADDR 0xc0010112 +#define MSR_K8_TSEG_MASK 0xc0010113 #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index ce029e4fa7c6..31247b5bff7c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -97,7 +97,6 @@ struct pv_lazy_ops { struct pv_time_ops { unsigned long long (*sched_clock)(void); unsigned long long (*steal_clock)(int cpu); - unsigned long (*get_tsc_khz)(void); }; struct pv_cpu_ops { diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h @@ -41,6 +41,7 @@ struct pvclock_wall_clock { #define PVCLOCK_TSC_STABLE_BIT (1 << 0) #define PVCLOCK_GUEST_STOPPED (1 << 1) +/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PVCLOCK_ABI_H */ diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 9d51fae1cba3..eaba08076030 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #endif -#define virt_queued_spin_lock virt_queued_spin_lock - -static inline bool virt_queued_spin_lock(struct qspinlock *lock) +#ifdef CONFIG_PARAVIRT +#define virt_spin_lock virt_spin_lock +static inline bool virt_spin_lock(struct qspinlock *lock) { if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) - cpu_relax(); + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + + do { + while (atomic_read(&lock->val) != 0) + cpu_relax(); + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); return true; } +#endif /* CONFIG_PARAVIRT */ #include <asm-generic/qspinlock.h> diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index e4661196994e..ff8b9a17dc4b 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t function. */ #define __HAVE_ARCH_MEMCPY 1 +extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); #ifndef CONFIG_KMEMCHECK -#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 -extern void *memcpy(void *to, const void *from, size_t len); -#else +#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ ({ \ size_t __len = (len); \ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 83aea8055119..4c20dd333412 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc) return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } -static inline int +static inline long HYPERVISOR_memory_op(unsigned int cmd, void *arg) { - return _hypercall2(int, memory_op, cmd, arg); + return _hypercall2(long, memory_op, cmd, arg); } static inline int diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h @@ -1,7 +1,7 @@ #ifndef __ASM_X86_BITSPERLONG_H #define __ASM_X86_BITSPERLONG_H -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__ILP32__) # define __BITS_PER_LONG 64 #else # define __BITS_PER_LONG 32 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c42827eb86cf..25f909362b7a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -338,10 +338,15 @@ done: static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) { + unsigned long flags; + if (instr[0] != 0x90) return; + local_irq_save(flags); add_nops(instr + (a->instrlen - a->padlen), a->padlen); + sync_core(); + local_irq_restore(flags); DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", instr, a->instrlen - a->padlen, a->padlen); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3ca3e46aa405..24e94ce454e2 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) apic_write(APIC_LVTT, lvtt_value); if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { + /* + * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode, + * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. + * According to Intel, MFENCE can do the serialization here. + */ + asm volatile("mfence" : : : "memory"); + printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); return; } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 38a76f826530..bb6bfc01cb82 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2522,6 +2522,7 @@ void __init setup_ioapic_dest(void) int pin, ioapic, irq, irq_entry; const struct cpumask *mask; struct irq_data *idata; + struct irq_chip *chip; if (skip_ioapic_setup == 1) return; @@ -2545,9 +2546,9 @@ void __init setup_ioapic_dest(void) else mask = apic->target_cpus(); - irq_set_affinity(irq, mask); + chip = irq_data_get_irq_chip(idata); + chip->irq_set_affinity(idata, mask, false); } - } #endif @@ -2906,6 +2907,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, struct irq_data *irq_data; struct mp_chip_data *data; struct irq_alloc_info *info = arg; + unsigned long flags; if (!info || nr_irqs > 1) return -EINVAL; @@ -2938,11 +2940,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, cfg = irqd_cfg(irq_data); add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); + + local_irq_save(flags); if (info->ioapic_entry) mp_setup_entry(cfg, data, info->ioapic_entry); mp_register_handler(virq, data->trigger); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); + local_irq_restore(flags); apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 1bbd0fe2c806..836d11b92811 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -489,10 +489,8 @@ static int apic_set_affinity(struct irq_data *irq_data, err = assign_irq_vector(irq, data, dest); if (err) { - struct irq_data *top = irq_get_irq_data(irq); - if (assign_irq_vector(irq, data, - irq_data_get_affinity_mask(top))) + irq_data_get_affinity_mask(irq_data))) pr_err("Failed to recover vector for irq %d\n", irq); return err; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 07ce52c22ec8..de22ea7ff82f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1110,10 +1110,10 @@ void print_cpu_info(struct cpuinfo_x86 *c) else printk(KERN_CONT "%d86", c->x86); - printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); + printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); if (c->x86_mask || c->cpuid_level >= 0) - printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); + printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); else printk(KERN_CONT ")\n"); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -34,11 +34,10 @@ struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); -static void (*hv_kexec_handler)(void); -static void (*hv_crash_handler)(struct pt_regs *regs); - #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); void hyperv_vector_handler(struct pt_regs *regs) { @@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) hv_crash_handler = NULL; } EXPORT_SYMBOL_GPL(hv_remove_crash_handler); -#endif +#ifdef CONFIG_KEXEC_CORE static void hv_machine_shutdown(void) { if (kexec_in_progress && hv_kexec_handler) @@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) hv_crash_handler(regs); native_machine_crash_shutdown(regs); } - +#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) { @@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif +#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; +#endif mark_tsc_unstable("running on Hyper-V"); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -47,6 +47,7 @@ enum extra_reg_type { EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_LBR = 2, /* lbr_select */ EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ + EXTRA_REG_FE = 4, /* fe_* */ EXTRA_REG_MAX /* number of entries needed */ }; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index cd9b6d0b10bf..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + /* + * Note the low 8 bits eventsel code is not a continuous field, containing + * some #GPing bits. These are masked out. + */ + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), EVENT_EXTRA_END }; @@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ + INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ EVENT_CONSTRAINT_END }; @@ -2316,9 +2321,12 @@ static struct event_constraint * intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { - struct event_constraint *c1 = cpuc->event_constraint[idx]; + struct event_constraint *c1 = NULL; struct event_constraint *c2; + if (idx >= 0) /* fake does < 0 */ + c1 = cpuc->event_constraint[idx]; + /* * first time only * - static constraint: no change across incremental scheduling calls @@ -2888,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); +PMU_FORMAT_ATTR(frontend, "config1:0-23"); + static struct attribute *intel_arch3_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -2904,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { NULL, }; +static struct attribute *skl_format_attr[] = { + &format_attr_frontend.attr, + NULL, +}; + static __initconst const struct x86_pmu core_pmu = { .name = "core", .handle_irq = x86_pmu_handle_irq, @@ -3513,7 +3528,8 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + skl_format_attr); WARN_ON(!x86_pmu.format_attrs); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 54690e885759..d1c0f254afbe 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c @@ -222,6 +222,7 @@ static void __bts_event_start(struct perf_event *event) if (!buf || bts_buffer_is_full(buf, bts)) return; + event->hw.itrace_started = 1; event->hw.state = 0; if (!buf->snapshot) diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -10,12 +10,12 @@ enum perf_msr_id { PERF_MSR_EVENT_MAX, }; -bool test_aperfmperf(int idx) +static bool test_aperfmperf(int idx) { return boot_cpu_has(X86_FEATURE_APERFMPERF); } -bool test_intel(int idx) +static bool test_intel(int idx) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, - { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) } #ifdef CONFIG_KEXEC_FILE -static int get_nr_ram_ranges_callback(unsigned long start_pfn, - unsigned long nr_pfn, void *arg) +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) { - int *nr_ranges = arg; + unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; @@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ced->image = image; - walk_system_ram_range(0, -1, &nr_ranges, + walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); ced->max_nr_ranges = nr_ranges; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index c80cf6699678..38da8f29a9c8 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -68,11 +68,10 @@ static inline void *current_stack(void) return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); } -static inline int -execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) { struct irq_stack *curstk, *irqstk; - u32 *isp, *prev_esp, arg1, arg2; + u32 *isp, *prev_esp, arg1; curstk = (struct irq_stack *) current_stack(); irqstk = __this_cpu_read(hardirq_stack); @@ -98,8 +97,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) asm volatile("xchgl %%ebx,%%esp \n" "call *%%edi \n" "movl %%ebx,%%esp \n" - : "=a" (arg1), "=d" (arg2), "=b" (isp) - : "0" (irq), "1" (desc), "2" (isp), + : "=a" (arg1), "=b" (isp) + : "0" (desc), "1" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; @@ -150,19 +149,15 @@ void do_softirq_own_stack(void) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { - unsigned int irq; - int overflow; - - overflow = check_stack_overflow(); + int overflow = check_stack_overflow(); if (IS_ERR_OR_NULL(desc)) return false; - irq = irq_desc_get_irq(desc); - if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { if (unlikely(overflow)) print_stack_overflow(); - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); } return true; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index ff16ccb918f2..c767cf2bc80a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -75,6 +75,6 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) if (unlikely(IS_ERR_OR_NULL(desc))) return false; - generic_handle_irq_desc(irq_desc_get_irq(desc), desc); + generic_handle_irq_desc(desc); return true; } diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 2bcc0525f1c1..6acc9dd91f36 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -58,7 +58,7 @@ static struct ldt_struct *alloc_ldt_struct(int size) if (alloc_size > PAGE_SIZE) new_ldt->entries = vzalloc(alloc_size); else - new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); + new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); if (!new_ldt->entries) { kfree(new_ldt); @@ -95,7 +95,7 @@ static void free_ldt_struct(struct ldt_struct *ldt) if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) vfree(ldt->entries); else - kfree(ldt->entries); + free_page((unsigned long)ldt->entries); kfree(ldt); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index f68e48f5f6c2..c2130aef3f9d 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -41,10 +41,18 @@ #include <asm/timer.h> #include <asm/special_insns.h> -/* nop stub */ -void _paravirt_nop(void) -{ -} +/* + * nop stub, which must not clobber anything *including the stack* to + * avoid confusing the entry prologues. + */ +extern void _paravirt_nop(void); +asm (".pushsection .entry.text, \"ax\"\n" + ".global _paravirt_nop\n" + "_paravirt_nop:\n\t" + "ret\n\t" + ".size _paravirt_nop, . - _paravirt_nop\n\t" + ".type _paravirt_nop, @function\n\t" + ".popsection"); /* identity function, which can be inlined */ u32 _paravirt_ident_32(u32 x) diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 84b8ef82a159..1b55de1267cf 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -131,8 +131,8 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) { - *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); + *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); if (!*dev) *dev = &x86_dma_fallback_dev; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..e28db181e4fc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +/* + * Called from fs/proc with a reference on @p to find the function + * which called into schedule(). This needs to be done carefully + * because the task might wake up and we might look at a stack + * changing under us. + */ +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long start, bottom, top, sp, fp, ip; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + start = (unsigned long)task_stack_page(p); + if (!start) + return 0; + + /* + * Layout of the stack page: + * + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) + * PADDING + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING + * stack + * ----------- bottom = start + sizeof(thread_info) + * thread_info + * ----------- start + * + * The tasks stack pointer points at the location where the + * framepointer is stored. The data on the stack is: + * ... IP FP ... IP FP + * + * We need to read FP and IP, so we need to adjust the upper + * bound by another unsigned long. + */ + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; + top -= 2 * sizeof(unsigned long); + bottom = start + sizeof(struct thread_info); + + sp = READ_ONCE(p->thread.sp); + if (sp < bottom || sp > top) + return 0; + + fp = READ_ONCE_NOCHECK(*(unsigned long *)sp); + do { + if (fp < bottom || fp > top) + return 0; + ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); + if (!in_sched_functions(ip)) + return ip; + fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); + } while (count++ < 16 && p->state != TASK_RUNNING); + return 0; +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } - -#define top_esp (THREAD_SIZE - sizeof(unsigned long)) -#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long bp, sp, ip; - unsigned long stack_page; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack_page = (unsigned long)task_stack_page(p); - sp = p->thread.sp; - if (!stack_page || sp < stack_page || sp > top_esp+stack_page) - return 0; - /* include/asm-i386/system.h:switch_to() pushes bp last. */ - bp = *(unsigned long *) sp; - do { - if (bp < stack_page || bp > top_ebp+stack_page) - return 0; - ip = *(unsigned long *) (bp+4); - if (!in_sched_functions(ip)) - return ip; - bp = *(unsigned long *) bp; - } while (count++ < 16); - return 0; -} - diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) } EXPORT_SYMBOL_GPL(set_personality_ia32); -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long stack; - u64 fp, ip; - int count = 0; - - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) - return 0; - fp = *(u64 *)(p->thread.sp); - do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) - return 0; - ip = *(u64 *)(fp+8); - if (!in_sched_functions(ip)) - return ip; - fp = *(u64 *)fp; - } while (count++ < 16); - return 0; -} - long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) { int ret = 0; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index fdb7f2a2d328..a3cccbfc5f77 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1173,6 +1173,14 @@ void __init setup_arch(char **cmdline_p) clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); + + /* + * sync back low identity map too. It is used for example + * in the 32-bit EFI stub. + */ + clone_pgd_range(initial_page_table, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); #endif tboot_probe(); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e0c198e5f920..892ee2e5ecbc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid) */ #define UDELAY_10MS_DEFAULT 10000 -static unsigned int init_udelay = UDELAY_10MS_DEFAULT; +static unsigned int init_udelay = INT_MAX; static int __init cpu_init_udelay(char *str) { @@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay); static void __init smp_quirk_init_udelay(void) { /* if cmdline changed it from default, leave it alone */ - if (init_udelay != UDELAY_10MS_DEFAULT) + if (init_udelay != INT_MAX) return; /* if modern processor, use no delay */ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) init_udelay = 0; + + /* else, use legacy delay */ + init_udelay = UDELAY_10MS_DEFAULT; } /* @@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - if (init_udelay) + if (init_udelay == 0) + udelay(10); + else udelay(300); pr_debug("Startup point 1\n"); @@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - if (init_udelay) + if (init_udelay == 0) + udelay(10); + else udelay(200); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index c8d52cb4cb6e..c3f7602cd038 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -21,6 +21,7 @@ #include <asm/hypervisor.h> #include <asm/nmi.h> #include <asm/x86_init.h> +#include <asm/geode.h> unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -1013,15 +1014,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); static void __init check_system_tsc_reliable(void) { -#ifdef CONFIG_MGEODE_LX - /* RTSC counts during suspend */ +#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) + if (is_geode_lx()) { + /* RTSC counts during suspend */ #define RTSC_SUSP 0x100 - unsigned long res_low, res_high; + unsigned long res_low, res_high; - rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); - /* Geode_LX - the OLPC CPU has a very reliable TSC */ - if (res_low & RTSC_SUSP) - tsc_clocksource_reliable = 1; + rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); + /* Geode_LX - the OLPC CPU has a very reliable TSC */ + if (res_low & RTSC_SUSP) + tsc_clocksource_reliable = 1; + } #endif if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) tsc_clocksource_reliable = 1; diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index abd8b856bd2b..524619351961 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -45,6 +45,7 @@ #include <linux/audit.h> #include <linux/stddef.h> #include <linux/slab.h> +#include <linux/security.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -232,6 +233,32 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) struct pt_regs *regs = current_pt_regs(); unsigned long err = 0; + err = security_mmap_addr(0); + if (err) { + /* + * vm86 cannot virtualize the address space, so vm86 users + * need to manage the low 1MB themselves using mmap. Given + * that BIOS places important data in the first page, vm86 + * is essentially useless if mmap_min_addr != 0. DOSEMU, + * for example, won't even bother trying to use vm86 if it + * can't map a page at virtual address 0. + * + * To reduce the available kernel attack surface, simply + * disallow vm86(old) for users who cannot mmap at va 0. + * + * The implementation of security_mmap_addr will allow + * suitably privileged users to map va 0 even if + * vm.mmap_min_addr is set above 0, and we want this + * behavior for vm86 as well, as it ensures that legacy + * tools like vbetool will not fail just because of + * vm.mmap_min_addr. + */ + pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", + current->comm, task_pid_nr(current), + from_kuid_munged(&init_user_ns, current_uid())); + return -EPERM; + } + if (!vm86) { if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) return -ENOMEM; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b372a7557c16..9da95b9daf8d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) u64 val, cr0, cr4; u32 base3; u16 selector; - int i; + int i, r; for (i = 0; i < 16; i++) *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); @@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); + r = rsm_enter_protected_mode(ctxt, cr0, cr4); + if (r != X86EMUL_CONTINUE) + return r; + for (i = 0; i < 6; i++) { - int r = rsm_load_seg_64(ctxt, smbase, i); + r = rsm_load_seg_64(ctxt, smbase, i); if (r != X86EMUL_CONTINUE) return r; } - return rsm_enter_protected_mode(ctxt, cr0, cr4); + return X86EMUL_CONTINUE; } static int em_rsm(struct x86_emulate_ctxt *ctxt) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 69088a1ba509..ff606f507913 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) break; reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, - leaf); + iterator.level); } walk_shadow_page_lockless_end(vcpu); @@ -3614,7 +3614,7 @@ static void __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct rsvd_bits_validate *rsvd_check, int maxphyaddr, int level, bool nx, bool gbpages, - bool pse) + bool pse, bool amd) { u64 exb_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0; @@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for * leaf entries) on AMD CPUs only. */ - if (guest_cpuid_is_amd(vcpu)) + if (amd) nonleaf_bit8_rsvd = rsvd_bits(8, 8); switch (level) { @@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, cpuid_maxphyaddr(vcpu), context->root_level, context->nx, guest_cpuid_has_gbpages(vcpu), - is_pse(vcpu)); + is_pse(vcpu), guest_cpuid_is_amd(vcpu)); } static void @@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { + /* + * Passing "true" to the last argument is okay; it adds a check + * on bit 8 of the SPTEs which KVM doesn't use anyway. + */ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, boot_cpu_data.x86_phys_bits, context->shadow_root_level, context->nx, - guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); + guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), + true); } EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); +static inline bool boot_cpu_is_amd(void) +{ + WARN_ON_ONCE(!tdp_enabled); + return shadow_x_mask == 0; +} + /* * the direct page table on host, use as much mmu features as * possible, however, kvm currently does not do execution-protection. @@ -3764,11 +3775,11 @@ static void reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - if (guest_cpuid_is_amd(vcpu)) + if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, boot_cpu_data.x86_phys_bits, context->shadow_root_level, false, - cpu_has_gbpages, true); + cpu_has_gbpages, true, true); else __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, boot_cpu_data.x86_phys_bits, diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index fdb8cb63a6c0..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -202,6 +202,7 @@ module_param(npt, int, S_IRUGO); static int nested = true; module_param(nested, int, S_IRUGO); +static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_complete_interrupts(struct vcpu_svm *svm); @@ -513,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } @@ -865,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } -#define MTRR_TYPE_UC_MINUS 7 -#define MTRR2PROTVAL_INVALID 0xff - -static u8 mtrr2protval[8]; - -static u8 fallback_mtrr_type(int mtrr) -{ - /* - * WT and WP aren't always available in the host PAT. Treat - * them as UC and UC- respectively. Everything else should be - * there. - */ - switch (mtrr) - { - case MTRR_TYPE_WRTHROUGH: - return MTRR_TYPE_UNCACHABLE; - case MTRR_TYPE_WRPROT: - return MTRR_TYPE_UC_MINUS; - default: - BUG(); - } -} - -static void build_mtrr2protval(void) -{ - int i; - u64 pat; - - for (i = 0; i < 8; i++) - mtrr2protval[i] = MTRR2PROTVAL_INVALID; - - /* Ignore the invalid MTRR types. */ - mtrr2protval[2] = 0; - mtrr2protval[3] = 0; - - /* - * Use host PAT value to figure out the mapping from guest MTRR - * values to nested page table PAT/PCD/PWT values. We do not - * want to change the host PAT value every time we enter the - * guest. - */ - rdmsrl(MSR_IA32_CR_PAT, pat); - for (i = 0; i < 8; i++) { - u8 mtrr = pat >> (8 * i); - - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) - mtrr2protval[mtrr] = __cm_idx2pte(i); - } - - for (i = 0; i < 8; i++) { - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { - u8 fallback = fallback_mtrr_type(i); - mtrr2protval[i] = mtrr2protval[fallback]; - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); - } - } -} - static __init int svm_hardware_setup(void) { int cpu; @@ -989,7 +932,6 @@ static __init int svm_hardware_setup(void) } else kvm_disable_tdp(); - build_mtrr2protval(); return 0; err: @@ -1144,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) return target_tsc - tsc; } -static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - - /* Unlike Intel, AMD takes the guest's CR0.CD into account. - * - * AMD does not have IPAT. To emulate it for the case of guests - * with no assigned devices, just set everything to WB. If guests - * have assigned devices, however, we cannot force WB for RAM - * pages only, so use the guest PAT directly. - */ - if (!kvm_arch_has_assigned_device(vcpu->kvm)) - *g_pat = 0x0606060606060606; - else - *g_pat = vcpu->arch.pat; -} - -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - u8 mtrr; - - /* - * 1. MMIO: trust guest MTRR, so same as item 3. - * 2. No passthrough: always map as WB, and force guest PAT to WB as well - * 3. Passthrough: can't guarantee the result, try to trust guest. - */ - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) - return 0; - - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && - kvm_read_cr0(vcpu) & X86_CR0_CD) - return _PAGE_NOCACHE; - - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - return mtrr2protval[mtrr]; -} - static void init_vmcb(struct vcpu_svm *svm, bool init_event) { struct vmcb_control_area *control = &svm->vmcb->control; @@ -1263,7 +1168,8 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * It also updates the guest-visible cr0 value. */ - (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); + svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); + kvm_mmu_reset_context(&svm->vcpu); save->cr4 = X86_CR4_PAE; /* rdx = ?? */ @@ -1276,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; - svm_set_guest_pat(svm, &save->g_pat); save->cr3 = 0; save->cr4 = 0; } @@ -1671,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - - /* These are emulated via page tables. */ - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); - + /* + * re-enable caching here because the QEMU bios + * does not do it - this results in some delay at + * reboot + */ + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -3349,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_IA32_CR_PAT: - if (npt_enabled) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; - vcpu->arch.pat = data; - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); - mark_dirty(svm->vmcb, VMCB_NPT); - break; - } - /* fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4193,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) return true; } +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + return 0; +} + static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d01986832afc..6a8bc64566ab 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4105,17 +4105,13 @@ static void seg_setup(int seg) static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; - struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; - kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; - kvm_userspace_mem.flags = 0; - kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; - kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); + r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, + APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (r) goto out; @@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ - struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); - kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; - kvm_userspace_mem.flags = 0; - kvm_userspace_mem.guest_phys_addr = - kvm->arch.ept_identity_map_addr; - kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); + r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, + kvm->arch.ept_identity_map_addr, PAGE_SIZE); return r; } @@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; - struct kvm_userspace_memory_region tss_mem = { - .slot = TSS_PRIVATE_MEMSLOT, - .guest_phys_addr = addr, - .memory_size = PAGE_SIZE * 3, - .flags = 0, - }; - ret = x86_set_memory_region(kvm, &tss_mem); + ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, + PAGE_SIZE * 3); if (ret) return ret; kvm->arch.tss_addr = addr; @@ -6064,6 +6050,8 @@ static __init int hardware_setup(void) memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ + if (enable_apicv) { for (msr = 0x800; msr <= 0x8ff; msr++) vmx_disable_intercept_msr_read_x2apic(msr); @@ -8615,17 +8603,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) u64 ipat = 0; /* For VT-d and EPT combination - * 1. MMIO: guest may want to apply WC, trust it. + * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. So the same as item 1. + * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + if (is_mmio) { + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a60bdbccff51..9a9a19830321 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -149,6 +149,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, + { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, @@ -1707,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->pvclock_set_guest_stopped_request = false; } - pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; - /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; @@ -2006,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) &vcpu->requests); ka->boot_vcpu_runs_old_kvmclock = tmp; - - ka->kvmclock_offset = -get_kernel_ns(); } vcpu->arch.time = data; @@ -2189,6 +2186,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: + case MSR_K8_TSEG_ADDR: + case MSR_K8_TSEG_MASK: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K8_INT_PENDING_MSG: @@ -6454,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) return 1; } +static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted); +} + static int vcpu_run(struct kvm_vcpu *vcpu) { int r; @@ -6462,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); for (;;) { - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && - !vcpu->arch.apf.halted) + if (kvm_vcpu_running(vcpu)) r = vcpu_enter_guest(vcpu); else r = vcpu_block(kvm, vcpu); @@ -7475,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm) kvm_free_pit(kvm); } -int __x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem) +int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) { int i, r; + unsigned long hva; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *slot, old; /* Called with kvm->slots_lock held. */ - BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); + if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) + return -EINVAL; + slot = id_to_memslot(slots, id); + if (size) { + if (WARN_ON(slot->npages)) + return -EEXIST; + + /* + * MAP_SHARED to prevent internal slot pages from being moved + * by fork()/COW. + */ + hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, 0); + if (IS_ERR((void *)hva)) + return PTR_ERR((void *)hva); + } else { + if (!slot->npages) + return 0; + + hva = 0; + } + + old = *slot; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - struct kvm_userspace_memory_region m = *mem; + struct kvm_userspace_memory_region m; - m.slot |= i << 16; + m.slot = id | (i << 16); + m.flags = 0; + m.guest_phys_addr = gpa; + m.userspace_addr = hva; + m.memory_size = size; r = __kvm_set_memory_region(kvm, &m); if (r < 0) return r; } + if (!size) { + r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); + WARN_ON(r < 0); + } + return 0; } EXPORT_SYMBOL_GPL(__x86_set_memory_region); -int x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem) +int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) { int r; mutex_lock(&kvm->slots_lock); - r = __x86_set_memory_region(kvm, mem); + r = __x86_set_memory_region(kvm, id, gpa, size); mutex_unlock(&kvm->slots_lock); return r; @@ -7517,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) * unless the the memory map has changed due to process exit * or fd copying. */ - struct kvm_userspace_memory_region mem; - memset(&mem, 0, sizeof(mem)); - mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); - - mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); - - mem.slot = TSS_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); + x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); + x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); + x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); @@ -7629,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { - /* - * Only private memory slots need to be mapped here since - * KVM_SET_MEMORY_REGION ioctl is no longer supported. - */ - if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { - unsigned long userspace_addr; - - /* - * MAP_SHARED to prevent internal slot pages from being moved - * by fork()/COW. - */ - userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, 0); - - if (IS_ERR((void *)userspace_addr)) - return PTR_ERR((void *)userspace_addr); - - memslot->userspace_addr = userspace_addr; - } - return 0; } @@ -7711,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, { int nr_mmu_pages = 0; - if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) { - int ret; - - ret = vm_munmap(old->userspace_addr, - old->npages * PAGE_SIZE); - if (ret < 0) - printk(KERN_WARNING - "kvm_vm_ioctl_set_memory_region: " - "failed to munmap memory\n"); - } - if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); @@ -7770,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, kvm_mmu_invalidate_zap_all_pages(kvm); } +static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) +{ + if (!list_empty_careful(&vcpu->async_pf.done)) + return true; + + if (kvm_apic_has_events(vcpu)) + return true; + + if (vcpu->arch.pv.pv_unhalted) + return true; + + if (atomic_read(&vcpu->arch.nmi_queued)) + return true; + + if (test_bit(KVM_REQ_SMI, &vcpu->requests)) + return true; + + if (kvm_arch_interrupt_allowed(vcpu) && + kvm_cpu_has_interrupt(vcpu)) + return true; + + return false; +} + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops->check_nested_events(vcpu, false); - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && - !vcpu->arch.apf.halted) - || !list_empty_careful(&vcpu->async_pf.done) - || kvm_apic_has_events(vcpu) - || vcpu->arch.pv.pv_unhalted - || atomic_read(&vcpu->arch.nmi_queued) || - (kvm_arch_interrupt_allowed(vcpu) && - kvm_cpu_has_interrupt(vcpu)); + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 161804de124a..a0d09f6c6533 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1015,7 +1015,7 @@ static struct clock_event_device lguest_clockevent = { * This is the Guest timer interrupt handler (hardware interrupt 0). We just * call the clockevent infrastructure and it does whatever needs doing. */ -static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) +static void lguest_time_irq(struct irq_desc *desc) { unsigned long flags; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) * has been zapped already via cleanup_highmem(). */ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 66338a60aa6e..c2aea63bee20 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -192,10 +192,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) node_set(node, numa_nodes_parsed); - pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n", + pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1, - hotpluggable ? " hotplug" : ""); + hotpluggable ? " hotplug" : "", + ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); /* Mark hotplug range in memblock. */ if (hotpluggable && memblock_mark_hotplug(start, ma->length)) diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 09d3afc0a181..dc78a4a9a466 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -166,6 +166,7 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; + pci_read_bridge_bases(b); list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -705,6 +705,70 @@ out: } /* + * Iterate the EFI memory map in reverse order because the regions + * will be mapped top-down. The end result is the same as if we had + * mapped things forward, but doesn't require us to change the + * existing implementation of efi_map_region(). + */ +static inline void *efi_map_next_entry_reverse(void *entry) +{ + /* Initial call */ + if (!entry) + return memmap.map_end - memmap.desc_size; + + entry -= memmap.desc_size; + if (entry < memmap.map) + return NULL; + + return entry; +} + +/* + * efi_map_next_entry - Return the next EFI memory map descriptor + * @entry: Previous EFI memory map descriptor + * + * This is a helper function to iterate over the EFI memory map, which + * we do in different orders depending on the current configuration. + * + * To begin traversing the memory map @entry must be %NULL. + * + * Returns %NULL when we reach the end of the memory map. + */ +static void *efi_map_next_entry(void *entry) +{ + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { + /* + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE + * config table feature requires us to map all entries + * in the same order as they appear in the EFI memory + * map. That is to say, entry N must have a lower + * virtual address than entry N+1. This is because the + * firmware toolchain leaves relative references in + * the code/data sections, which are split and become + * separate EFI memory regions. Mapping things + * out-of-order leads to the firmware accessing + * unmapped addresses. + * + * Since we need to map things this way whether or not + * the kernel actually makes use of + * EFI_PROPERTIES_TABLE, let's just switch to this + * scheme by default for 64-bit. + */ + return efi_map_next_entry_reverse(entry); + } + + /* Initial call */ + if (!entry) + return memmap.map; + + entry += memmap.desc_size; + if (entry >= memmap.map_end) + return NULL; + + return entry; +} + +/* * Map the efi memory ranges of the runtime services and update new_mmap with * virtual addresses. */ @@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) unsigned long left = 0; efi_memory_desc_t *md; - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + p = NULL; + while ((p = efi_map_next_entry(p))) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) { #ifdef CONFIG_X86_64 diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 9701a4fd7bf2..836a1eb5df43 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -12,7 +12,10 @@ #include <skas.h> #include <sysdep/tls.h> -extern int modify_ldt(int func, void *ptr, unsigned long bytecount); +static inline int modify_ldt (int func, void *ptr, unsigned long bytecount) +{ + return syscall(__NR_modify_ldt, func, ptr, bytecount); +} static long write_ldt_entry(struct mm_id *mm_idp, int func, struct user_desc *desc, void **addr, int done) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 30d12afe52ed..993b7a71386d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -33,6 +33,10 @@ #include <linux/memblock.h> #include <linux/edd.h> +#ifdef CONFIG_KEXEC_CORE +#include <linux/kexec.h> +#endif + #include <xen/xen.h> #include <xen/events.h> #include <xen/interface/xen.h> @@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) /* Fast syscall setup is all done in hypercalls, so these are all ignored. Stub them out here to stop Xen console noise. */ + break; default: if (!pmu_msr_write(msr, low, high, &ret)) @@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = { .notifier_call = xen_hvm_cpu_notify, }; +#ifdef CONFIG_KEXEC_CORE +static void xen_hvm_shutdown(void) +{ + native_machine_shutdown(); + if (kexec_in_progress) + xen_reboot(SHUTDOWN_soft_reset); +} + +static void xen_hvm_crash_shutdown(struct pt_regs *regs) +{ + native_machine_crash_shutdown(regs); + xen_reboot(SHUTDOWN_soft_reset); +} +#endif + static void __init xen_hvm_guest_init(void) { if (xen_pv_domain()) @@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void) x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); +#ifdef CONFIG_KEXEC_CORE + machine_ops.shutdown = xen_hvm_shutdown; + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; +#endif } #endif diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index bfc08b13044b..660b3cfef234 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -112,6 +112,15 @@ static unsigned long *p2m_identity; static pte_t *p2m_missing_pte; static pte_t *p2m_identity_pte; +/* + * Hint at last populated PFN. + * + * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack + * can avoid scanning the whole P2M (which may be sized to account for + * hotplugged memory). + */ +static unsigned long xen_p2m_last_pfn; + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void) else HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); - HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; HYPERVISOR_shared_info->arch.p2m_generation = 0; HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; HYPERVISOR_shared_info->arch.p2m_cr3 = @@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void) static struct vm_struct vm; unsigned long p2m_limit; + xen_p2m_last_pfn = xen_max_p2m_pfn; + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), @@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn) free_p2m_page(p2m); } + /* Expanded the p2m? */ + if (pfn > xen_p2m_last_pfn) { + xen_p2m_last_pfn = pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; + } + return true; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f5ef6746d47a..1c30e4ab1022 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages, limit; domid_t domid = DOMID_SELF; - int ret; + long ret; limit = xen_get_pages_limit(); max_pages = limit; @@ -798,7 +798,7 @@ char * __init xen_memory_setup(void) xen_ignore_unusable(); /* Make sure the Xen-supplied memory map is well-ordered. */ - sanitize_e820_map(xen_e820_map, xen_e820_map_entries, + sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), &xen_e820_map_entries); max_pages = xen_get_max_pages(); diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -28,4 +28,5 @@ generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index d27b4dcf221f..b848cc3dc913 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -210,6 +210,10 @@ subsys_initcall(pcibios_init); void pcibios_fixup_bus(struct pci_bus *bus) { + if (bus->parent) { + /* This is a subordinate bridge */ + pci_read_bridge_bases(bus); + } } void pcibios_set_master(struct pci_dev *dev) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 4aecca79374a..14b8faf8b09d 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip->bip_vec + bip->bip_vcnt; + if (bip->bip_vcnt && + bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), + &bip->bip_vec[bip->bip_vcnt - 1], offset)) + return 0; + iv->bv_page = page; iv->bv_len = len; iv->bv_offset = offset; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ac8370cb2515..55512dd62633 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q) blkg_destroy(blkg); spin_unlock(&blkcg->lock); } + + q->root_blkg = NULL; + q->root_rl.blkg = NULL; } /* diff --git a/block/blk-core.c b/block/blk-core.c index 2eb722d48773..18e92a6645e2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_destroy(&q->backing_dev_info); + bdi_unregister(&q->backing_dev_info); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index f548b64be092..75f29cf70188 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, q->limits.max_integrity_segments) return false; + if (integrity_req_gap_back_merge(req, next->bio)) + return false; + return true; } EXPORT_SYMBOL(blk_integrity_merge_rq); diff --git a/block/blk-map.c b/block/blk-map.c index 233841644c9d..f565e11f465a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -9,6 +9,24 @@ #include "blk.h" +static bool iovec_gap_to_prv(struct request_queue *q, + struct iovec *prv, struct iovec *cur) +{ + unsigned long prev_end; + + if (!queue_virt_boundary(q)) + return false; + + if (prv->iov_base == NULL && prv->iov_len == 0) + /* prv is not set - don't check */ + return false; + + prev_end = (unsigned long)(prv->iov_base + prv->iov_len); + + return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || + prev_end & queue_virt_boundary(q)); +} + int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio) { @@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct bio *bio; int unaligned = 0; struct iov_iter i; - struct iovec iov; + struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; if (!iter || !iter->count) return -EINVAL; @@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, /* * Keep going so we check length of all segments */ - if (uaddr & queue_dma_alignment(q)) + if ((uaddr & queue_dma_alignment(q)) || + iovec_gap_to_prv(q, &prv, &iov)) unaligned = 1; + + prv.iov_base = iov.iov_base; + prv.iov_len = iov.iov_len; } if (unaligned || (q->dma_pad_mask & iter->count) || map_data) diff --git a/block/blk-merge.c b/block/blk-merge.c index d088cffb8105..c4e9c37f3e38 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio *bio, struct bio_set *bs) { - struct bio *split; - struct bio_vec bv, bvprv; + struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; unsigned seg_size = 0, nsegs = 0, sectors = 0; - int prev = 0; bio_for_each_segment(bv, bio, iter) { - sectors += bv.bv_len >> 9; - - if (sectors > queue_max_sectors(q)) + if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) goto split; /* * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. */ - if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset)) + if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) goto split; - if (prev && blk_queue_cluster(q)) { + if (bvprvp && blk_queue_cluster(q)) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; - if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) + if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) goto new_segment; - if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) + if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) goto new_segment; seg_size += bv.bv_len; bvprv = bv; - prev = 1; + bvprvp = &bv; + sectors += bv.bv_len >> 9; continue; } new_segment: @@ -104,23 +101,14 @@ new_segment: nsegs++; bvprv = bv; - prev = 1; + bvprvp = &bv; seg_size = bv.bv_len; + sectors += bv.bv_len >> 9; } return NULL; split: - split = bio_clone_bioset(bio, GFP_NOIO, bs); - - split->bi_iter.bi_size -= iter.bi_size; - bio->bi_iter = iter; - - if (bio_integrity(bio)) { - bio_integrity_advance(bio, split->bi_iter.bi_size); - bio_integrity_trim(split, 0, bio_sectors(split)); - } - - return split; + return bio_split(bio, sectors, GFP_NOIO, bs); } void blk_queue_split(struct request_queue *q, struct bio **bio, @@ -439,6 +427,11 @@ no_merge: int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { + if (req_gap_back_merge(req, bio)) + return 0; + if (blk_integrity_rq(req) && + integrity_req_gap_back_merge(req, bio)) + return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req)) { req->cmd_flags |= REQ_NOMERGE; @@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { + + if (req_gap_front_merge(req, bio)) + return 0; + if (blk_integrity_rq(req) && + integrity_req_gap_front_merge(req, bio)) + return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req)) { req->cmd_flags |= REQ_NOMERGE; @@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req) return !q->mq_ops && req->special; } -static int req_gap_to_prev(struct request *req, struct bio *next) -{ - struct bio *prev = req->biotail; - - return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], - next->bi_io_vec[0].bv_offset); -} - static int ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) { @@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, if (req_no_special_merge(req) || req_no_special_merge(next)) return 0; - if (req_gap_to_prev(req, next->bio)) + if (req_gap_back_merge(req, next->bio)) return 0; /* @@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) !blk_write_same_mergeable(rq->bio, bio)) return false; - /* Only check gaps if the bio carries data */ - if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) - return false; - return true; } diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) return cpu; } -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; @@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; - for_each_online_cpu(i) { + for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) @@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) queue = 0; for_each_possible_cpu(i) { - if (!cpu_online(i)) { + if (!cpumask_test_cpu(i, online_mask)) { map[i] = 0; continue; } @@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) if (!map) return NULL; - if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) return map; kfree(map); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) unsigned int i, first = 1; ssize_t ret = 0; - blk_mq_disable_hotplug(); - for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); @@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) first = 0; } - blk_mq_enable_hotplug(); - ret += sprintf(ret + page, "\n"); return ret; } @@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return; hctx_for_each_ctx(hctx, ctx, i) @@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i, ret; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return 0; ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); @@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) struct blk_mq_ctx *ctx; int i, j; + blk_mq_disable_hotplug(); + queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); @@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); + + q->mq_sysfs_init_done = false; + blk_mq_enable_hotplug(); } static void blk_mq_sysfs_init(struct request_queue *q) @@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) struct blk_mq_hw_ctx *hctx; int ret, i; + blk_mq_disable_hotplug(); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) - return ret; + goto out; kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - hctx->flags |= BLK_MQ_F_SYSFS_UP; ret = blk_mq_register_hctx(hctx); if (ret) break; } - if (ret) { + if (ret) blk_mq_unregister_disk(disk); - return ret; - } + else + q->mq_sysfs_init_done = true; +out: + blk_mq_enable_hotplug(); - return 0; + return ret; } EXPORT_SYMBOL_GPL(blk_mq_register_disk); @@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + if (!q->mq_sysfs_init_done) + return; + queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } @@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; + if (!q->mq_sysfs_init_done) + return ret; + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ec2d11915142 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { - struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_hw_ctx *hctx; + int i; + + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; + + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!blk_mq_hw_queue_mapped(hctx)) + continue; + + if (tags->nr_reserved_tags) + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, + false); + } - if (tags->nr_reserved_tags) - bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); - bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, - false); } -EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { @@ -628,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) { bt_free(&tags->bitmap_tags); bt_free(&tags->breserved_tags); + free_cpumask_var(tags->cpumask); kfree(tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, + void *priv); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..85f014327342 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ -void blk_mq_complete_request(struct request *rq) +void blk_mq_complete_request(struct request *rq, int error) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) + if (!blk_mark_rq_complete(rq)) { + rq->errors = error; __blk_mq_complete_request(rq); + } } EXPORT_SYMBOL(blk_mq_complete_request); @@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ - if (unlikely(blk_queue_dying(rq->q))) { - rq->errors = -EIO; - blk_mq_complete_request(rq); - } + if (unlikely(blk_queue_dying(rq->q))) + blk_mq_complete_request(rq, -EIO); return; } if (rq->cmd_flags & REQ_NO_TIMEOUT) @@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) .next = 0, .next_set = 0, }; - struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - /* - * If not software queues are currently mapped to this - * hardware queue, there's nothing to check - */ - if (!blk_mq_hw_queue_mapped(hctx)) - continue; - - blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); - } + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { + struct blk_mq_hw_ctx *hctx; + queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) @@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static void blk_mq_map_swqueue(struct request_queue *q) +static void blk_mq_map_swqueue(struct request_queue *q, + const struct cpumask *online_mask) { unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; + /* + * Avoid others reading imcomplete hctx->cpumask through sysfs + */ + mutex_lock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpu_online(i)) + if (!cpumask_test_cpu(i, online_mask)) continue; hctx = q->mq_ops->map_queue(q, i); cpumask_set_cpu(i, hctx->cpumask); - cpumask_set_cpu(i, hctx->tags->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + mutex_unlock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_ctxmap *map = &hctx->ctx_map; @@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } + + queue_for_each_ctx(q, ctx, i) { + if (!cpumask_test_cpu(i, online_mask)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->tags->cpumask); + } } static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) @@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) kfree(hctx); } + kfree(q->mq_map); + q->mq_map = NULL; + kfree(q->queue_hw_ctx); /* ctx kobj stays in queue_ctx */ @@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (blk_mq_init_hw_queues(q, set)) goto err_hctxs; + get_online_cpus(); mutex_lock(&all_q_mutex); - list_add_tail(&q->all_q_node, &all_q_list); - mutex_unlock(&all_q_mutex); + list_add_tail(&q->all_q_node, &all_q_list); blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q, cpu_online_mask); - blk_mq_map_swqueue(q); + mutex_unlock(&all_q_mutex); + put_online_cpus(); return q; @@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; + mutex_lock(&all_q_mutex); + list_del_init(&q->all_q_node); + mutex_unlock(&all_q_mutex); + blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); percpu_ref_exit(&q->mq_usage_counter); - - kfree(q->mq_map); - - q->mq_map = NULL; - - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); } /* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) +static void blk_mq_queue_reinit(struct request_queue *q, + const struct cpumask *online_mask) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); blk_mq_sysfs_unregister(q); - blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); /* * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe @@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) * involves free and re-allocate memory, worthy doing?) */ - blk_mq_map_swqueue(q); + blk_mq_map_swqueue(q, online_mask); blk_mq_sysfs_register(q); } @@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { struct request_queue *q; + int cpu = (unsigned long)hcpu; + /* + * New online cpumask which is going to be set in this hotplug event. + * Declare this cpumasks as global as cpu-hotplug operation is invoked + * one-by-one and dynamically allocating this could result in a failure. + */ + static struct cpumask online_new; /* - * Before new mappings are established, hotadded cpu might already - * start handling requests. This doesn't break anything as we map - * offline CPUs to first hardware queue. We will re-init the queue - * below to get optimal settings. + * Before hotadded cpu starts handling requests, new mappings must + * be established. Otherwise, these requests in hw queue might + * never be dispatched. + * + * For example, there is a single hw queue (hctx) and two CPU queues + * (ctx0 for CPU0, and ctx1 for CPU1). + * + * Now CPU1 is just onlined and a request is inserted into + * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is + * still zero. + * + * And then while running hw queue, flush_busy_ctxs() finds bit0 is + * set in pending bitmap and tries to retrieve requests in + * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, + * so the request in ctx1->rq_list is ignored. */ - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && - action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DEAD: + case CPU_UP_CANCELED: + cpumask_copy(&online_new, cpu_online_mask); + break; + case CPU_UP_PREPARE: + cpumask_copy(&online_new, cpu_online_mask); + cpumask_set_cpu(cpu, &online_new); + break; + default: return NOTIFY_OK; + } mutex_lock(&all_q_mutex); @@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, } list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_queue_reinit(q); + blk_mq_queue_reinit(q, &online_new); list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_unfreeze_queue(q); @@ -2260,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) int i; for (i = 0; i < set->nr_hw_queues; i++) { - if (set->tags[i]) { + if (set->tags[i]) blk_mq_free_rq_map(set, set->tags[i], i); - free_cpumask_var(set->tags[i]->cpumask); - } } kfree(set->tags); diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); * CPU -> queue mappings */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); -extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3e44a9da2a13..07b42f5ad797 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj) struct request_queue *q = container_of(kobj, struct request_queue, kobj); + bdi_exit(&q->backing_dev_info); blkcg_exit_queue(q); if (q->elevator) { diff --git a/block/bounce.c b/block/bounce.c index 0611aea1cfe9..1cb5dd3a5da1 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -128,12 +128,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) struct bio *bio_orig = bio->bi_private; struct bio_vec *bvec, *org_vec; int i; + int start = bio_orig->bi_iter.bi_idx; /* * free up bounce indirect pages used */ bio_for_each_segment_all(bvec, bio, i) { - org_vec = bio_orig->bi_io_vec + i; + org_vec = bio_orig->bi_io_vec + i + start; + if (bvec->bv_page == org_vec->bv_page) continue; diff --git a/crypto/ahash.c b/crypto/ahash.c index 8acb886032ae..9c1dc8d6106a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) struct crypto_alg *base = &alg->halg.base; if (alg->halg.digestsize > PAGE_SIZE / 8 || - alg->halg.statesize > PAGE_SIZE / 8) + alg->halg.statesize > PAGE_SIZE / 8 || + alg->halg.statesize == 0) return -EINVAL; base->cra_type = &crypto_ahash_type; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) srlen = cert->raw_serial_size; q = cert->raw_serial; } - if (srlen > 1 && *q == 0) { - srlen--; - q++; - } ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 35c2de136971..fa18753f5c34 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -940,6 +940,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, char *xbuf[XBUFSIZE]; char *xoutbuf[XBUFSIZE]; int ret = -ENOMEM; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); if (testmgr_alloc_buf(xbuf)) goto out_nobuf; @@ -975,7 +976,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, continue; if (template[i].iv) - memcpy(iv, template[i].iv, MAX_IVLEN); + memcpy(iv, template[i].iv, ivsize); else memset(iv, 0, MAX_IVLEN); @@ -1051,7 +1052,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, continue; if (template[i].iv) - memcpy(iv, template[i].iv, MAX_IVLEN); + memcpy(iv, template[i].iv, ivsize); else memset(iv, 0, MAX_IVLEN); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 09f37b516808..4dde37c3d8fc 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); #if (!ACPI_REDUCED_HARDWARE) ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index f7731f260c31..591ea95319e2 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); /* * tbfadt - FADT parse/convert/validate */ -void acpi_tb_parse_fadt(u32 table_index); +void acpi_tb_parse_fadt(void); void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); @@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); */ acpi_status acpi_tb_initialize_facs(void); -u8 acpi_tb_tables_loaded(void); - void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header); diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index faad911d46b5..10ce48e16ebf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -71,7 +71,7 @@ acpi_status acpi_enable(void) /* ACPI tables must be present */ - if (!acpi_tb_tables_loaded()) { + if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 455a0700db39..a6454f4a6fb3 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * * FUNCTION: acpi_tb_parse_fadt * - * PARAMETERS: table_index - Index for the FADT + * PARAMETERS: None * * RETURN: None * @@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * ******************************************************************************/ -void acpi_tb_parse_fadt(u32 table_index) +void acpi_tb_parse_fadt(void) { u32 length; struct acpi_table_header *table; @@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index) * Get a local copy of the FADT and convert it to a common format * Map entire FADT, assumed to be smaller than one page. */ - length = acpi_gbl_root_table_list.tables[table_index].length; + length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length; table = - acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. - address, length); + acpi_os_map_memory(acpi_gbl_root_table_list. + tables[acpi_gbl_fadt_index].address, length); if (!table) { return; } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 4337990127cc..d8ddef38c947 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -99,29 +99,6 @@ acpi_status acpi_tb_initialize_facs(void) /******************************************************************************* * - * FUNCTION: acpi_tb_tables_loaded - * - * PARAMETERS: None - * - * RETURN: TRUE if required ACPI tables are loaded - * - * DESCRIPTION: Determine if the minimum required ACPI tables are present - * (FADT, FACS, DSDT) - * - ******************************************************************************/ - -u8 acpi_tb_tables_loaded(void) -{ - - if (acpi_gbl_root_table_list.current_table_count >= 4) { - return (TRUE); - } - - return (FALSE); -} - -/******************************************************************************* - * * FUNCTION: acpi_tb_check_dsdt_header * * PARAMETERS: None @@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. tables[table_index].signature, ACPI_SIG_FADT)) { - acpi_tb_parse_fadt(table_index); + acpi_gbl_fadt_index = table_index; + acpi_tb_parse_fadt(); } next_table: diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 46506e7687cd..a212cefae524 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -315,14 +315,10 @@ static void acpi_bus_osc_support(void) capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ -#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ - defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; -#endif - -#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; -#endif + if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) goto err_exit; mutex_lock(&ec->mutex); + result = -ENODATA; list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { + result = 0; q->handler = acpi_ec_get_query_handler(handler); ec_dbg_evt("Query(0x%02x) scheduled", q->handler->query_bit); diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index 9dcf83682e36..33505c651f62 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c @@ -33,13 +33,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { -#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) - acpi_create_platform_device(adev); -#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) + if (IS_ENABLED(CONFIG_INT340X_THERMAL)) + acpi_create_platform_device(adev); /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ - if (id->driver_data == INT3401_DEVICE) + else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && + id->driver_data == INT3401_DEVICE) acpi_create_platform_device(adev); -#endif return 1; } diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && + acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) PIRQ_PENALTY_PCI_POSSIBLE; } } - /* Add a penalty for the SCI */ - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; + return 0; } @@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) irq = link->irq.possible[i]; } } + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " + "Try pci=noacpi or acpi=off\n", + acpi_device_name(link->device), + acpi_device_bid(link->device)); + return -ENODEV; + } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { @@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) } } +bool acpi_isa_irq_available(int irq) +{ + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); +} + /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for diff --git a/drivers/atm/he.c b/drivers/atm/he.c index a8da3a50e374..0f5cb37636bc 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev) kfree(he_dev->rbpl_virt); kfree(he_dev->rbpl_table); - - if (he_dev->rbpl_pool) - dma_pool_destroy(he_dev->rbpl_pool); + dma_pool_destroy(he_dev->rbpl_pool); if (he_dev->rbrq_base) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), @@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tpdrq_base, he_dev->tpdrq_phys); - if (he_dev->tpd_pool) - dma_pool_destroy(he_dev->tpd_pool); + dma_pool_destroy(he_dev->tpd_pool); if (he_dev->pci_dev) { pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 74e18b0a6d89..3d7fb6516f74 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg) continue; } - skb = alloc_skb(size + 1, GFP_ATOMIC); + /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of + * headroom, and ensures we can route packets back out an + * Ethernet interface (for example) without having to + * reallocate. Adding NET_IP_ALIGN also ensures that both + * PPPoATM and PPPoEoBR2684 packets end up aligned. */ + skb = netdev_alloc_skb_ip_align(NULL, size + 1); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); @@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg) /* Allocate RX skbs for any ports which need them */ if (card->using_dma && card->atmdev[port] && !card->rx_skb[port]) { - struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); + /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN + * here; the FPGA can only DMA to addresses which are + * aligned to 4 bytes. */ + struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE); if (skb) { SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, skb->data, diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 764280a91776..e9fd32e91668 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) if (sibling == cpu) /* skip itself */ continue; + sib_cpu_ci = get_cpu_cacheinfo(sibling); + if (!sib_cpu_ci->info_list) + continue; + sib_leaf = sib_cpu_ci->info_list + index; cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); @@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) static void free_cache_attributes(unsigned int cpu) { + if (!per_cpu_cacheinfo(cpu)) + return; + cache_shared_cpu_map_remove(cpu); kfree(per_cpu_cacheinfo(cpu)); @@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb, break; case CPU_DEAD: cache_remove_dev(cpu); - if (per_cpu_cacheinfo(cpu)) - free_cache_attributes(cpu); + free_cache_attributes(cpu); break; } return notifier_from_errno(rc); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 950fff9ce453..a12ff9863d7e 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, * global one. Requires architecture specific dev_get_cma_area() helper * function. */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int align) { if (align > CONFIG_CMA_ALIGNMENT) diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 1857a5dd0816..134483daac25 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -63,20 +63,8 @@ static int platform_msi_init(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { - struct irq_data *data; - - irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - info->chip, info->chip_data); - - /* - * Save the MSI descriptor in handler_data so that the - * irq_write_msi_msg callback can retrieve it (and the - * associated device). - */ - data = irq_domain_get_irq_data(domain, virq); - data->handler_data = arg->desc; - - return 0; + return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + info->chip, info->chip_data); } #else #define platform_msi_set_desc NULL @@ -97,7 +85,7 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info) static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { - struct msi_desc *desc = irq_data_get_irq_handler_data(data); + struct msi_desc *desc = irq_data_get_msi_desc(data); struct platform_msi_priv_data *priv_data; priv_data = desc->platform.msi_priv_data; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 2a4154a09e4d..85e17bacc834 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->start_latency_ns; + constraint_ns -= td->save_state_latency_ns + + td->stop_latency_ns + + td->start_latency_ns + + td->restore_state_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; - td->cached_stop_ok = constraint_ns > td->stop_latency_ns || - constraint_ns == 0; + td->cached_stop_ok = constraint_ns >= 0; + /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. @@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } min_off_time_ns = -1; /* @@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) * constraint_ns cannot be negative here, because the device has * been suspended. */ - constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) u32 microvolt[3] = {0}; int count, ret; - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); - if (!count) + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) return 0; + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + /* There can be one or three elements here */ if (count != 1 && count != 3) { dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", @@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); * share a common logic which is isolated here. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. * * Locking: The internal device_opp and opp structures are RCU protected. @@ -1151,7 +1158,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f42f2bac6466..4c55cfbad19e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) { - snprintf(buf, buf_size, "%x", max_val); - return strlen(buf); + return snprintf(NULL, 0, "%x", max_val); } static ssize_t regmap_name_read_file(struct file *file, @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos >= count - 1 - tot_len) + if (buf_pos + tot_len + 1 >= count) break; /* Format the register */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 293495a75d3d..1b87623381e2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -60,6 +60,7 @@ struct nbd_device { bool disconnect; /* a disconnect has been requested by user */ struct timer_list timeout_timer; + spinlock_t tasks_lock; struct task_struct *task_recv; struct task_struct *task_send; @@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd) static void nbd_xmit_timeout(unsigned long arg) { struct nbd_device *nbd = (struct nbd_device *)arg; - struct task_struct *task; + unsigned long flags; if (list_empty(&nbd->queue_head)) return; nbd->disconnect = true; - task = READ_ONCE(nbd->task_recv); - if (task) - force_sig(SIGKILL, task); + spin_lock_irqsave(&nbd->tasks_lock, flags); + + if (nbd->task_recv) + force_sig(SIGKILL, nbd->task_recv); - task = READ_ONCE(nbd->task_send); - if (task) + if (nbd->task_send) force_sig(SIGKILL, nbd->task_send); + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); } @@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd) { struct request *req; int ret; + unsigned long flags; BUG_ON(nbd->magic != NBD_MAGIC); sk_set_memalloc(nbd->sock->sk); + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); + + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + return ret; } @@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd) device_remove_file(disk_to_dev(nbd->disk), &pid_attr); + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); if (signal_pending(current)) { siginfo_t info; @@ -534,8 +546,11 @@ static int nbd_thread_send(void *data) { struct nbd_device *nbd = data; struct request *req; + unsigned long flags; + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_send = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); set_user_nice(current, MIN_NICE); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { @@ -572,7 +587,15 @@ static int nbd_thread_send(void *data) nbd_handle_req(nbd, req); } + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_send = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + + /* Clear maybe pending signals */ + if (signal_pending(current)) { + siginfo_t info; + dequeue_signal_lock(current, ¤t->blocked, &info); + } return 0; } @@ -1052,6 +1075,7 @@ static int __init nbd_init(void) nbd_dev[i].magic = NBD_MAGIC; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); + spin_lock_init(&nbd_dev[i].tasks_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); init_timer(&nbd_dev[i].timeout_timer); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 17269a3b85f2..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); @@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = { .complete = null_softirq_done_fn, }; +static void cleanup_queue(struct nullb_queue *nq) +{ + kfree(nq->tag_map); + kfree(nq->cmds); +} + +static void cleanup_queues(struct nullb *nullb) +{ + int i; + + for (i = 0; i < nullb->nr_queues; i++) + cleanup_queue(&nullb->queues[i]); + + kfree(nullb->queues); +} + static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); @@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb) if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); + cleanup_queues(nullb); kfree(nullb); } @@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq) return 0; } -static void cleanup_queue(struct nullb_queue *nq) -{ - kfree(nq->tag_map); - kfree(nq->cmds); -} - -static void cleanup_queues(struct nullb *nullb) -{ - int i; - - for (i = 0; i < nullb->nr_queues; i++) - cleanup_queue(&nullb->queues[i]); - - kfree(nullb->queues); -} - static int setup_queues(struct nullb *nullb) { nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), @@ -588,8 +589,7 @@ static int null_add_dev(void) blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; - sector_div(size, bs); - set_capacity(disk, size); + set_capacity(disk, size >> 9); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..ccc0c1f93daa 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -603,31 +603,34 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_iod *iod = ctx; struct request *req = iod_get_private(iod); struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); - u16 status = le16_to_cpup(&cqe->status) >> 1; + bool requeue = false; + int error = 0; if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { unsigned long flags; + requeue = true; blk_mq_requeue_request(req); spin_lock_irqsave(req->q->queue_lock, flags); if (!blk_queue_stopped(req->q)) blk_mq_kick_requeue_list(req->q); spin_unlock_irqrestore(req->q->queue_lock, flags); - return; + goto release_iod; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; + error = -EINTR; else - req->errors = status; + error = status; } else { - req->errors = nvme_error_status(status); + error = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -636,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (cmd_rq->aborted) dev_warn(nvmeq->dev->dev, "completing aborted command with status:%04x\n", - status); + error); +release_iod: if (iod->nents) { dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -650,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + if (likely(!requeue)) + blk_mq_complete_request(req, error); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -863,8 +868,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -1806,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->lba_shift; meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(unsigned long)io.metadata; + metadata = (void __user *)(uintptr_t)io.metadata; write = io.opcode & 1; if (ns->ext) { @@ -1846,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.metadata = cpu_to_le64(meta_dma); status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, - (void __user *)io.addr, length, NULL, 0); + (void __user *)(uintptr_t)io.addr, length, NULL, 0); unmap: if (meta) { if (status == NVME_SC_SUCCESS && !write) { @@ -1888,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, timeout = msecs_to_jiffies(cmd.timeout_ms); status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, - NULL, (void __user *)cmd.addr, cmd.data_len, + NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, &cmd.result, timeout); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -2439,6 +2443,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2450,6 +2470,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2953,22 +2974,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -3010,8 +3015,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -3062,7 +3065,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d93a0372b37b..6f26cf38c6f9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_MINORS_PER_MAJOR 256 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 +#define RBD_MAX_PARENT_CHAIN_LEN 16 + #define RBD_SNAP_DEV_NAME_PREFIX "snap_" #define RBD_MAX_SNAP_NAME_LEN \ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) @@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); static void rbd_spec_put(struct rbd_spec *spec); static int rbd_dev_id_to_minor(int dev_id) @@ -1863,9 +1865,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_read_callback(obj_request); break; case CEPH_OSD_OP_SETALLOCHINT: - rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE); + rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || + osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); /* fall through */ case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: rbd_osd_write_callback(obj_request); break; case CEPH_OSD_OP_STAT: @@ -2401,7 +2405,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, opcode = CEPH_OSD_OP_ZERO; } } else if (op_type == OBJ_OP_WRITE) { - opcode = CEPH_OSD_OP_WRITE; + if (!offset && length == object_size) + opcode = CEPH_OSD_OP_WRITEFULL; + else + opcode = CEPH_OSD_OP_WRITE; osd_req_op_alloc_hint_init(osd_request, num_ops, object_size, object_size); num_ops++; @@ -3760,6 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); @@ -5125,44 +5133,51 @@ out_err: return ret; } -static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) +/* + * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> + * rbd_dev_image_probe() recursion depth, which means it's also the + * length of the already discovered part of the parent chain. + */ +static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) { struct rbd_device *parent = NULL; - struct rbd_spec *parent_spec; - struct rbd_client *rbdc; int ret; if (!rbd_dev->parent_spec) return 0; - /* - * We need to pass a reference to the client and the parent - * spec when creating the parent rbd_dev. Images related by - * parent/child relationships always share both. - */ - parent_spec = rbd_spec_get(rbd_dev->parent_spec); - rbdc = __rbd_get_client(rbd_dev->rbd_client); - ret = -ENOMEM; - parent = rbd_dev_create(rbdc, parent_spec, NULL); - if (!parent) + if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { + pr_info("parent chain is too long (%d)\n", depth); + ret = -EINVAL; goto out_err; + } - ret = rbd_dev_image_probe(parent, false); + parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec, + NULL); + if (!parent) { + ret = -ENOMEM; + goto out_err; + } + + /* + * Images related by parent/child relationships always share + * rbd_client and spec/parent_spec, so bump their refcounts. + */ + __rbd_get_client(rbd_dev->rbd_client); + rbd_spec_get(rbd_dev->parent_spec); + + ret = rbd_dev_image_probe(parent, depth); if (ret < 0) goto out_err; + rbd_dev->parent = parent; atomic_set(&rbd_dev->parent_ref, 1); - return 0; + out_err: - if (parent) { - rbd_dev_unparent(rbd_dev); + rbd_dev_unparent(rbd_dev); + if (parent) rbd_dev_destroy(parent); - } else { - rbd_put_client(rbdc); - rbd_spec_put(parent_spec); - } - return ret; } @@ -5280,7 +5295,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) * parent), initiate a watch on its header object before using that * object to get detailed information about the rbd image. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) { int ret; @@ -5298,7 +5313,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) if (ret) goto err_out_format; - if (mapping) { + if (!depth) { ret = rbd_dev_header_watch_sync(rbd_dev); if (ret) { if (ret == -ENOENT) @@ -5319,7 +5334,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Otherwise this is a parent image, identified by pool, image * and snap ids - need to fill in names for those ids. */ - if (mapping) + if (!depth) ret = rbd_spec_fill_snap_id(rbd_dev); else ret = rbd_spec_fill_names(rbd_dev); @@ -5341,12 +5356,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Need to warn users if this image is the one being * mapped and has a parent. */ - if (mapping && rbd_dev->parent_spec) + if (!depth && rbd_dev->parent_spec) rbd_warn(rbd_dev, "WARNING: kernel layering is EXPERIMENTAL!"); } - ret = rbd_dev_probe_parent(rbd_dev); + ret = rbd_dev_probe_parent(rbd_dev, depth); if (ret) goto err_out_probe; @@ -5357,7 +5372,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - if (mapping) + if (!depth) rbd_dev_header_unwatch_sync(rbd_dev); out_header_name: kfree(rbd_dev->header_name); @@ -5420,7 +5435,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, spec = NULL; /* rbd_dev now owns this */ rbd_opts = NULL; /* rbd_dev now owns this */ - rc = rbd_dev_image_probe(rbd_dev, true); + rc = rbd_dev_image_probe(rbd_dev, 0); if (rc < 0) goto err_out_rbd_dev; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, static int xen_blkif_disconnect(struct xen_blkif *blkif) { + struct pending_req *req, *n; + int i = 0, j; + if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); + /* Check that there is no request in use */ + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { + list_del(&req->free_list); + + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) + kfree(req->segments[j]); + + for (j = 0; j < MAX_INDIRECT_PAGES; j++) + kfree(req->indirect_pages[j]); + + kfree(req); + i++; + } + + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); + blkif->nr_ring_pages = 0; + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) { - struct pending_req *req, *n; - int i = 0, j; xen_blkif_disconnect(blkif); xen_vbd_free(&blkif->vbd); @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) BUG_ON(!list_empty(&blkif->free_pages)); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); - /* Check that there is no request in use */ - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { - list_del(&req->free_list); - - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) - kfree(req->segments[j]); - - for (j = 0; j < MAX_INDIRECT_PAGES; j++) - kfree(req->indirect_pages[j]); - - kfree(req); - i++; - } - - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - kmem_cache_free(xen_blkif_cachep, blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..a69c02dadec0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; spin_lock_irqsave(&info->io_lock, flags); @@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) continue; } - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } - if (unlikely(req->errors)) { - if (req->errors == -EOPNOTSUPP) - req->errors = 0; + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; default: BUG(); @@ -1955,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev, break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: - blkfront_closing(info); + if (info) + blkfront_closing(info); break; } } diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 965d1afb0eaa..5cb13ca3a3ac 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp) * allocate new zcomp and initialize it. return compressing * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in - * case of allocation error. + * case of allocation error, or any other error potentially + * returned by functions zcomp_strm_{multi,single}_create. */ struct zcomp *zcomp_create(const char *compress, int max_strm) { struct zcomp *comp; struct zcomp_backend *backend; + int error; backend = find_backend(compress); if (!backend) @@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm) comp->backend = backend; if (max_strm > 1) - zcomp_strm_multi_create(comp, max_strm); + error = zcomp_strm_multi_create(comp, max_strm); else - zcomp_strm_single_create(comp); - if (!comp->stream) { + error = zcomp_strm_single_create(comp); + if (error) { kfree(comp); - return ERR_PTR(-ENOMEM); + return ERR_PTR(error); } return comp; } diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17681..0ebca8ba7bc4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL config ARM_CCI500_PMU bool "ARM CCI500 PMU support" - default y depends on (ARM && CPU_V7) || ARM64 depends on PERF_EVENTS select ARM_CCI_PMU diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 7d9879e166cf..7082c7268845 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) break; target = cpumask_any_but(cpu_online_mask, cpu); - if (target < 0) + if (target >= nr_cpu_ids) break; perf_pmu_migrate_context(&dt->pmu, cpu, target); cpumask_set_cpu(target, &dt->cpu); - WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); + if (ccn->irq) + WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); default: break; } diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index c37cf754a985..3c77645405e5 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev) if (IS_ERR(ctx->csr_base)) return PTR_ERR(ctx->csr_base); - ctx->irq = platform_get_irq(pdev, 0); - if (ctx->irq < 0) { + rc = platform_get_irq(pdev, 0); + if (rc < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - return ctx->irq; + return rc; } + ctx->irq = rc; dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", ctx->csr_base, ctx->irq); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 43e2c3ad6c31..0ebcf449778a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2437,7 +2437,8 @@ static int __clk_init(struct device *dev, struct clk *clk_user) hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { if (orphan->num_parents && orphan->ops->get_parent) { i = orphan->ops->get_parent(orphan->hw); - if (!strcmp(core->name, orphan->parent_names[i])) + if (i >= 0 && i < orphan->num_parents && + !strcmp(core->name, orphan->parent_names[i])) clk_core_reparent(orphan, core); continue; } diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c index 2a38eb4a2552..6cf38dc1c929 100644 --- a/drivers/clk/h8300/clk-h8s2678.c +++ b/drivers/clk/h8300/clk-h8s2678.c @@ -8,6 +8,7 @@ #include <linux/err.h> #include <linux/device.h> #include <linux/of_address.h> +#include <linux/slab.h> static DEFINE_SPINLOCK(clklock); diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index 2c16807341dc..e43485448612 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig @@ -1,6 +1,12 @@ config COMMON_CLK_HI6220 bool "Hi6220 Clock Driver" - depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX + depends on ARCH_HISI || COMPILE_TEST default ARCH_HISI help Build the Hisilicon Hi6220 clock driver based on the common clock framework. + +config STUB_CLK_HI6220 + bool "Hi6220 Stub Clock Driver" + depends on COMMON_CLK_HI6220 && MAILBOX + help + Build the Hisilicon Hi6220 stub clock driver. diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 4a1001a11f04..74dba31590f9 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -7,4 +7,5 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o -obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o clk-hi6220-stub.o +obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o +obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 5837eb8a212f..85da8b983256 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node) for_each_node_by_type(dn, "cpu") { struct clk_init_data init; struct clk *clk; + struct clk *parent_clk; char *clk_name = kzalloc(5, GFP_KERNEL); int cpu, err; @@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node) goto bail_out; sprintf(clk_name, "cpu%d", cpu); + parent_clk = of_clk_get(node, 0); - cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); + cpuclk[cpu].parent_name = __clk_get_name(parent_clk); cpuclk[cpu].clk_name = clk_name; cpuclk[cpu].cpu = cpu; cpuclk[cpu].reg_base = clock_complex_base; diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index ed02bbc7b11f..abb47608713b 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -716,6 +716,8 @@ static const char *const rk3188_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", "hclk_peri", + "pclk_cpu", + "pclk_peri", }; static void __init rk3188_common_clk_init(struct device_node *np) @@ -744,8 +746,6 @@ static void __init rk3188_common_clk_init(struct device_node *np) rockchip_clk_register_branches(common_clk_branches, ARRAY_SIZE(common_clk_branches)); - rockchip_clk_protect_critical(rk3188_critical_clocks, - ARRAY_SIZE(rk3188_critical_clocks)); rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); @@ -765,6 +765,8 @@ static void __init rk3066a_clk_init(struct device_node *np) mux_armclk_p, ARRAY_SIZE(mux_armclk_p), &rk3066_cpuclk_data, rk3066_cpuclk_rates, ARRAY_SIZE(rk3066_cpuclk_rates)); + rockchip_clk_protect_critical(rk3188_critical_clocks, + ARRAY_SIZE(rk3188_critical_clocks)); } CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init); @@ -801,6 +803,9 @@ static void __init rk3188a_clk_init(struct device_node *np) pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n", __func__); } + + rockchip_clk_protect_critical(rk3188_critical_clocks, + ARRAY_SIZE(rk3188_critical_clocks)); } CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init); diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index 9c5d61e698ef..7e6b783e6eee 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -818,6 +818,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), }; +static const char *const rk3368_critical_clocks[] __initconst = { + "pclk_pd_pmu", +}; + static void __init rk3368_clk_init(struct device_node *np) { void __iomem *reg_base; @@ -862,6 +866,8 @@ static void __init rk3368_clk_init(struct device_node *np) RK3368_GRF_SOC_STATUS0); rockchip_clk_register_branches(rk3368_clk_branches, ARRAY_SIZE(rk3368_clk_branches)); + rockchip_clk_protect_critical(rk3368_critical_clocks, + ARRAY_SIZE(rk3368_critical_clocks)); rockchip_clk_register_armclk(ARMCLKB, "armclkb", mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 7c1e1f58e2da..2fe37f708dc7 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, * the values for DIV_COPY and DIV_HPM dividers need not be set. */ div0 = cfg_data->div0; - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { div1 = cfg_data->div1; if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) div1 = readl(base + E4210_DIV_CPU1) & @@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; WARN_ON(alt_div >= MAX_DIV); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { /* * In Exynos4210, ATB clock parent is also mout_core. So * ATB clock also needs to be mantained at safe speed. @@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, writel(div0, base + E4210_DIV_CPU0); wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { writel(div1, base + E4210_DIV_CPU1); wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, DIV_MASK_ALL); @@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, unsigned long mux_reg; /* find out the divider values to use for clock data */ - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { while ((cfg_data->prate * 1000) != ndata->new_rate) { if (cfg_data->prate == 0) return -EINVAL; @@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); div_mask |= E4210_DIV0_ATB_MASK; } diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 83ccf142ff2a..576cd0354d48 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -307,7 +307,7 @@ static const struct clkgen_quadfs_data st_fs660c32_F_416 = { .get_rate = clk_fs660c32_dig_get_rate, }; -static const struct clkgen_quadfs_data st_fs660c32_C_407 = { +static const struct clkgen_quadfs_data st_fs660c32_C = { .nrst_present = true, .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), CLKGEN_FIELD(0x2f0, 0x1, 1), @@ -350,7 +350,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = { .get_rate = clk_fs660c32_dig_get_rate, }; -static const struct clkgen_quadfs_data st_fs660c32_D_407 = { +static const struct clkgen_quadfs_data st_fs660c32_D = { .nrst_present = true, .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), CLKGEN_FIELD(0x2a0, 0x1, 1), @@ -1077,11 +1077,11 @@ static const struct of_device_id quadfs_of_match[] = { }, { .compatible = "st,stih407-quadfs660-C", - .data = &st_fs660c32_C_407 + .data = &st_fs660c32_C }, { .compatible = "st,stih407-quadfs660-D", - .data = &st_fs660c32_D_407 + .data = &st_fs660c32_D }, {} }; diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 47a38a994cac..b2a332cf8985 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -193,7 +193,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = { .ops = &stm_pll3200c32_ops, }; -static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { +static const struct clkgen_pll_data st_pll3200c32_cx_0 = { /* 407 C0 PLL0 */ .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), @@ -205,7 +205,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { .ops = &stm_pll3200c32_ops, }; -static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = { +static const struct clkgen_pll_data st_pll3200c32_cx_1 = { /* 407 C0 PLL1 */ .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), @@ -624,12 +624,12 @@ static const struct of_device_id c32_pll_of_match[] = { .data = &st_pll3200c32_407_a0, }, { - .compatible = "st,stih407-plls-c32-c0_0", - .data = &st_pll3200c32_407_c0_0, + .compatible = "st,plls-c32-cx_0", + .data = &st_pll3200c32_cx_0, }, { - .compatible = "st,stih407-plls-c32-c0_1", - .data = &st_pll3200c32_407_c0_1, + .compatible = "st,plls-c32-cx_1", + .data = &st_pll3200c32_cx_1, }, { .compatible = "st,stih407-plls-c32-a9", diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index c2ff859ee0e8..c4e3a52e225b 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c @@ -682,11 +682,17 @@ static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate) struct dev_pm_opp *opp; int i, uv; + rcu_read_lock(); + opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); - if (IS_ERR(opp)) + if (IS_ERR(opp)) { + rcu_read_unlock(); return PTR_ERR(opp); + } uv = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + for (i = 0; i < td->i2c_lut_size; i++) { if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) return i; diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 676ee8f6d813..8831e1a05367 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), - DT_CLK(NULL, "uart4_ick", "uart4_ick"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), @@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = { static struct ti_dt_clk omap36xx_clks[] = { DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), DT_CLK(NULL, "uart4_fck", "uart4_fck"), + DT_CLK(NULL, "uart4_ick", "uart4_ick"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9b5b289e6334..a911d7de3377 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -18,7 +18,6 @@ #include "clock.h" -#define DRA7_DPLL_ABE_DEFFREQ 180633600 #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 @@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = { int __init dra7xx_dt_clk_init(void) { int rc; - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; + struct clk *dpll_ck, *hdcp_ck; ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); - - rc = clk_set_parent(abe_dpll_mux, sys_clkin2); - if (!rc) - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); - if (rc) - pr_err("%s: failed to configure ABE DPLL!\n", __func__); - - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); - if (rc) - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); - dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); if (rc) diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index 90d7d8a21c49..1ddc288fce4e 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (unlikely(!clk->enable_reg)) { + if (unlikely(IS_ERR(clk->enable_reg))) { pr_err("%s: %s missing enable_reg\n", __func__, clk_hw_get_name(hw)); ret = -EINVAL; @@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { + if (IS_ERR(clk->enable_reg)) { /* * 'independent' here refers to a clock which is not * controlled by its parent. diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 15b921a9248c..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { struct acpi_cpufreq_data *data = policy->driver_data; + if (unlikely(!data)) + return -ENODEV; + return cpufreq_show_cpus(data->freqdomain_cpus, buf); } @@ -375,12 +378,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); - policy = cpufreq_cpu_get(cpu); + policy = cpufreq_cpu_get_raw(cpu); if (unlikely(!policy)) return 0; data = policy->driver_data; - cpufreq_cpu_put(policy); if (unlikely(!data || !data->freq_table)) return 0; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6633b3fa996e..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -238,13 +238,13 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_generic_init); -/* Only for cpufreq core internal use */ -static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; } +EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); unsigned int cpufreq_generic_get(unsigned int cpu) { @@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) * since this is a core component, and is essential for the * subsequent light-weight ->init() to succeed. */ - if (cpufreq_driver->exit) + if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); + policy->freq_table = NULL; + } } /** diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3af9dd7332e6..aa33b92b3e3e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu) local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + if (cpu->prev_mperf == mperf) { + local_irq_restore(flags); + return; + } + tsc = rdtsc(); local_irq_restore(flags); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 07bc7aa6b224..d234719065a5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -461,7 +461,7 @@ config CRYPTO_DEV_QCE config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" - depends on PPC64 + depends on PPC64 && VSX help Support for VMX cryptographic acceleration instructions. diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index b60698b30d30..bc2a55bc35e4 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) int mv_cesa_queue_req(struct crypto_async_request *req); +/* + * Helper function that indicates whether a crypto request needs to be + * cleaned up or not after being enqueued using mv_cesa_queue_req(). + */ +static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, + int ret) +{ + /* + * The queue still had some space, the request was queued + * normally, so there's no need to clean it up. + */ + if (ret == -EINPROGRESS) + return false; + + /* + * The queue had not space left, but since the request is + * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to + * the backlog and will be processed later. There's no need to + * clean it up. + */ + if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + return false; + + /* Request wasn't queued, we need to clean it up */ + return true; +} + /* TDMA functions */ static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 0745cf3b9c0e..3df2f4e7adb2 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, { struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - creq->req.base.engine = engine; if (creq->req.base.type == CESA_DMA_REQ) @@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req, return ret; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ablkcipher_cleanup(req); return ret; @@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req, return ret; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ablkcipher_cleanup(req); return ret; @@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, return ret; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ablkcipher_cleanup(req); return ret; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index ae9272eb9c1a..e8d0d7128137 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req) return 0; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) { + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ahash_cleanup(req); - return ret; - } return ret; } @@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req) return 0; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ahash_cleanup(req); return ret; @@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) return 0; ret = mv_cesa_queue_req(&req->base); - if (ret && ret != -EINPROGRESS) + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ahash_cleanup(req); return ret; diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index a57b4194de28..0a5ca0ba5d64 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) struct pci_dev *parent = pdev->bus->self; uint16_t bridge_ctl = 0; + if (accel_dev->is_vf) + return; + dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", accel_dev->accel_id); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index e070c316e8b7..a19ee127edca 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -104,7 +104,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) sg_miter_next(&mo); oo = 0; } - } while (mo.length > 0); + } while (oleft > 0); if (areq->info) { for (i = 0; i < 4 && i < ivsize / 4; i++) { diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ca1b362d77e2..ca848cc6a8fd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -53,7 +53,7 @@ static struct devfreq *find_device_devfreq(struct device *dev) { struct devfreq *tmp_devfreq; - if (unlikely(IS_ERR_OR_NULL(dev))) { + if (IS_ERR_OR_NULL(dev)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } @@ -133,7 +133,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name) { struct devfreq_governor *tmp_governor; - if (unlikely(IS_ERR_OR_NULL(name))) { + if (IS_ERR_OR_NULL(name)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } @@ -177,10 +177,10 @@ int update_devfreq(struct devfreq *devfreq) return err; /* - * Adjust the freuqency with user freq and QoS. + * Adjust the frequency with user freq and QoS. * - * List from the highest proiority - * max_freq (probably called by thermal when it's too hot) + * List from the highest priority + * max_freq * min_freq */ @@ -482,7 +482,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->profile->max_state * devfreq->profile->max_state, GFP_KERNEL); - devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * + devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * devfreq->profile->max_state, GFP_KERNEL); devfreq->last_stat_updated = jiffies; @@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); - goto err_dev; + goto err_out; } mutex_unlock(&devfreq->lock); @@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); -err_dev: kfree(devfreq); err_out: return ERR_PTR(err); @@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, ret = PTR_ERR(governor); goto out; } - if (df->governor == governor) + if (df->governor == governor) { + ret = 0; goto out; + } if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index f9901f52a225..f312485f1451 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -319,7 +319,8 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev, case PPMU_PMNCNT3: pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH); pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW); - load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low; + load_count = ((u64)((pmcnt_high & 0xff)) << 32) + + (u64)pmcnt_low; break; } edata->load_count = load_count; diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 0720ba84ca92..ae72ba5e78df 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -21,17 +21,20 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, unsigned long *freq) { - struct devfreq_dev_status stat; - int err = df->profile->get_dev_status(df->dev.parent, &stat); + int err; + struct devfreq_dev_status *stat; unsigned long long a, b; unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; struct devfreq_simple_ondemand_data *data = df->data; unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; + err = devfreq_update_stats(df); if (err) return err; + stat = &df->last_status; + if (data) { if (data->upthreshold) dfso_upthreshold = data->upthreshold; @@ -43,41 +46,41 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, return -EINVAL; /* Assume MAX if it is going to be divided by zero */ - if (stat.total_time == 0) { + if (stat->total_time == 0) { *freq = max; return 0; } /* Prevent overflow */ - if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { - stat.busy_time >>= 7; - stat.total_time >>= 7; + if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { + stat->busy_time >>= 7; + stat->total_time >>= 7; } /* Set MAX if it's busy enough */ - if (stat.busy_time * 100 > - stat.total_time * dfso_upthreshold) { + if (stat->busy_time * 100 > + stat->total_time * dfso_upthreshold) { *freq = max; return 0; } /* Set MAX if we do not know the initial frequency */ - if (stat.current_frequency == 0) { + if (stat->current_frequency == 0) { *freq = max; return 0; } /* Keep the current frequency */ - if (stat.busy_time * 100 > - stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { - *freq = stat.current_frequency; + if (stat->busy_time * 100 > + stat->total_time * (dfso_upthreshold - dfso_downdifferential)) { + *freq = stat->current_frequency; return 0; } /* Set the desired frequency based on the load */ - a = stat.busy_time; - a *= stat.current_frequency; - b = div_u64(a, stat.total_time); + a = stat->busy_time; + a *= stat->current_frequency; + b = div_u64(a, stat->total_time); b *= 100; b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); *freq = (unsigned long) b; diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 13a1a6e8108c..848b93ee930f 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -541,18 +541,20 @@ static struct devfreq_dev_profile tegra_devfreq_profile = { static int tegra_governor_get_target(struct devfreq *devfreq, unsigned long *freq) { - struct devfreq_dev_status stat; + struct devfreq_dev_status *stat; struct tegra_devfreq *tegra; struct tegra_devfreq_device *dev; unsigned long target_freq = 0; unsigned int i; int err; - err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat); + err = devfreq_update_stats(devfreq); if (err) return err; - tegra = stat.private_data; + stat = &devfreq->last_status; + + tegra = stat->private_data; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { dev = &tegra->devices[i]; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; @@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 4768a829253a..2bf37e68ad0f 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -266,7 +266,7 @@ int ipu_irq_unmap(unsigned int source) } /* Chained IRQ handler for IPU function and error interrupt */ -static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void ipu_irq_handler(struct irq_desc *desc) { struct ipu *ipu = irq_desc_get_handler_data(desc); u32 status; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, @@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | @@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index a07addde297b..8dd0af1d50bc 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) { if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { - *attached = new ? true : false; + *attached = ((new >> idx) & 0x1) ? true : false; return true; } diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index d8de6a8dd4de..665efca59487 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -139,6 +139,14 @@ config QCOM_SCM bool depends on ARM || ARM64 +config QCOM_SCM_32 + def_bool y + depends on QCOM_SCM && ARM + +config QCOM_SCM_64 + def_bool y + depends on QCOM_SCM && ARM64 + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 000830fc6707..2ee83474a3c1 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o +obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o +obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) obj-y += broadcom/ diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -13,6 +13,7 @@ */ #include <linux/efi.h> +#include <linux/sort.h> #include <asm/efi.h> #include "efistub.h" @@ -305,6 +306,44 @@ fail: */ #define EFI_RT_VIRTUAL_BASE 0x40000000 +static int cmp_mem_desc(const void *l, const void *r) +{ + const efi_memory_desc_t *left = l, *right = r; + + return (left->phys_addr > right->phys_addr) ? 1 : -1; +} + +/* + * Returns whether region @left ends exactly where region @right starts, + * or false if either argument is NULL. + */ +static bool regions_are_adjacent(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + u64 left_end; + + if (left == NULL || right == NULL) + return false; + + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; + + return left_end == right->phys_addr; +} + +/* + * Returns whether region @left and region @right have compatible memory type + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. + */ +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | + EFI_MEMORY_WC | EFI_MEMORY_UC | + EFI_MEMORY_RUNTIME; + + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; +} + /* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; - efi_memory_desc_t *out = runtime_map; + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; int l; - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *in = (void *)memory_map + l; + /* + * To work around potential issues with the Properties Table feature + * introduced in UEFI 2.5, which may split PE/COFF executable images + * in memory into several RuntimeServicesCode and RuntimeServicesData + * regions, we need to preserve the relative offsets between adjacent + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. + * The easiest way to find adjacent regions is to sort the memory map + * before traversing it. + */ + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); + + for (l = 0; l < map_size; l += desc_size, prev = in) { u64 paddr, size; + in = (void *)memory_map + l; if (!(in->attribute & EFI_MEMORY_RUNTIME)) continue; + paddr = in->phys_addr; + size = in->num_pages * EFI_PAGE_SIZE; + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - paddr = round_down(in->phys_addr, SZ_64K); - size = round_up(in->num_pages * EFI_PAGE_SIZE + - in->phys_addr - paddr, SZ_64K); - - /* - * Avoid wasting memory on PTEs by choosing a virtual base that - * is compatible with section mappings if this region has the - * appropriate size and physical alignment. (Sections are 2 MB - * on 4k granule kernels) - */ - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) - efi_virt_base = round_up(efi_virt_base, SZ_2M); + if (!regions_are_adjacent(prev, in) || + !regions_have_compatible_memory_type_attrs(prev, in)) { + + paddr = round_down(in->phys_addr, SZ_64K); + size += in->phys_addr - paddr; + + /* + * Avoid wasting memory on PTEs by choosing a virtual + * base that is compatible with section mappings if this + * region has the appropriate size and physical + * alignment. (Sections are 2 MB on 4k granule kernels) + */ + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) + efi_virt_base = round_up(efi_virt_base, SZ_2M); + else + efi_virt_base = round_up(efi_virt_base, SZ_64K); + } in->virt_addr = efi_virt_base + in->phys_addr - paddr; efi_virt_base += size; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index e334a01cf92f..6b6548fda089 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -5,10 +5,6 @@ /* error code which can't be mistaken for valid address */ #define EFI_ERROR (~0UL) -#undef memcpy -#undef memset -#undef memmove - void efi_char16_printk(efi_system_table_t *, efi_char16_t *); efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c new file mode 100644 index 000000000000..bb6555f6d63b --- /dev/null +++ b/drivers/firmware/qcom_scm-64.c @@ -0,0 +1,63 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/qcom_scm.h> + +/** + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the cold boot address of the cpus. Any cpu outside the supported + * range would be removed from the cpu present mask. + */ +int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENOTSUPP; +} + +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENOTSUPP; +} + +/** + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags - Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. + */ +void __qcom_scm_cpu_power_down(u32 flags) +{ +} + +int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + return -ENOTSUPP; +} + +int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + return -ENOTSUPP; +} diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b4fc9e4d24c6..8949b3f6f74d 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -356,7 +356,7 @@ config GPIO_PXA config GPIO_RCAR tristate "Renesas R-Car GPIO" - depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) + depends on ARCH_SHMOBILE || COMPILE_TEST select GPIOLIB_IRQCHIP help Say yes here to support GPIO on Renesas R-Car SoCs. diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 9b7e0b3db387..1b44941574fa 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc, return 0; } -static void altera_gpio_irq_edge_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_edge_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; @@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq, } -static void altera_gpio_irq_leveL_high_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 31b90ac15204..33a1f9779b86 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -433,7 +433,7 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void bcm_kona_gpio_irq_handler(struct irq_desc *desc) { void __iomem *reg_base; int bit, bank_id; diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 9ea86d2ac054..4c64627c6bb5 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -236,7 +236,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank) } /* Each UPG GIO block has one IRQ for all banks */ -static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void brcmstb_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 94b0ab709721..5e715388803d 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -326,8 +326,7 @@ static struct irq_chip gpio_irqchip = { .flags = IRQCHIP_SET_TYPE_MASKED, }; -static void -gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct davinci_gpio_regs __iomem *g; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index c5be4b9b8baf..fcd5b0acfc72 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -147,7 +147,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio) return ret; } -static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) +static void dwapb_irq_handler(struct irq_desc *desc) { struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 9d90366ea259..3e3947b35c83 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) EP93XX_GPIO_REG(int_debounce_register_offset[port])); } -static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) { unsigned char status; int i; @@ -100,8 +100,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) } } -static void ep93xx_gpio_f_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) { /* * map discontiguous hw irq range to continuous sw irq range: diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index aa28c65eb6b4..70097472b02c 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = { }; MODULE_DEVICE_TABLE(pci, intel_gpio_ids); -static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) +static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 153af464c7a7..127c37b380ae 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, return 0; } -static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) +static void lp_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 8ef7a12de983..48ef368347ab 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -194,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) return -ENXIO; } -static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 7bcfb87a5fa6..22523aae8abe 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = { .irq_bus_sync_unlock = msic_bus_sync_unlock, }; -static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void msic_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index d2012cfb5571..4b4222145f10 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -305,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) * which have been set as summary IRQ lines and which are triggered, * and to call their interrupt handlers. */ -static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) +static void msm_summary_irq_handler(struct irq_desc *desc) { unsigned long i; struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index b396bf3bf294..df418b81456d 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -458,7 +458,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void mvebu_gpio_irq_handler(struct irq_desc *desc) { struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index b752b560126e..b8dd847443c5 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -272,7 +272,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) } /* MX1 and MX3 has one interrupt *per* gpio port */ -static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx3_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); @@ -288,7 +288,7 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) } /* MX2 has one interrupt *for all* gpio ports */ -static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx2_gpio_irq_handler(struct irq_desc *desc) { u32 irq_msk, irq_stat; struct mxc_gpio_port *port; @@ -339,13 +339,15 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) return 0; } -static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) +static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base, port->base, handle_level_irq); + if (!gc) + return -ENOMEM; gc->private = port; ct = gc->chip_types; @@ -360,6 +362,8 @@ static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + + return 0; } static void mxc_gpio_get_hw(struct platform_device *pdev) @@ -477,12 +481,16 @@ static int mxc_gpio_probe(struct platform_device *pdev) } /* gpio-mxc can be a generic irq chip */ - mxc_gpio_init_gc(port, irq_base); + err = mxc_gpio_init_gc(port, irq_base); + if (err < 0) + goto out_irqdomain_remove; list_add_tail(&port->node, &mxc_gpio_ports); return 0; +out_irqdomain_remove: + irq_domain_remove(port->domain); out_irqdesc_free: irq_free_descs(irq_base, 32); out_gpiochip_remove: diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index b7f383eb18d9..a4288f428819 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -154,7 +154,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio) } /* MXS has one interrupt *per* gpio port */ -static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mxs_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); @@ -196,13 +196,16 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) return 0; } -static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) +static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base, port->base, handle_level_irq); + if (!gc) + return -ENOMEM; + gc->private = port; ct = gc->chip_types; @@ -216,6 +219,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + + return 0; } static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) @@ -317,7 +322,9 @@ static int mxs_gpio_probe(struct platform_device *pdev) } /* gpio-mxs can be a generic irq chip */ - mxs_gpio_init_gc(port, irq_base); + err = mxs_gpio_init_gc(port, irq_base); + if (err < 0) + goto out_irqdomain_remove; /* setup one handler for each entry */ irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, @@ -343,6 +350,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) out_bgpio_remove: bgpio_remove(&port->bgc); +out_irqdomain_remove: + irq_domain_remove(port->domain); out_irqdesc_free: irq_free_descs(irq_base, 32); return err; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 2ae0d47e9554..5236db161e76 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -709,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) * line's interrupt handler has been run, we may miss some nested * interrupts. */ -static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_gpio_irq_handler(struct irq_desc *desc) { void __iomem *isr_reg = NULL; u32 isr; @@ -1098,7 +1098,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) } else { bank->chip.label = "gpio"; bank->chip.base = gpio; - gpio += bank->width; } bank->chip.ngpio = bank->width; @@ -1108,6 +1107,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) return ret; } + if (!bank->is_mpuio) + gpio += bank->width; + #ifdef CONFIG_ARCH_OMAP1 /* * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop @@ -1253,8 +1255,11 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_mod_init(bank); ret = omap_gpio_chip_init(bank, irqc); - if (ret) + if (ret) { + pm_runtime_put_sync(bank->dev); + pm_runtime_disable(bank->dev); return ret; + } omap_gpio_show_rev(bank); diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 04756130437f..229ef653e0f8 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger) return 0; } -static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) +static void pl061_irq_handler(struct irq_desc *desc) { unsigned long pending; int offset; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 55a11de3d5b7..df2ce550f309 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) +static void pxa_gpio_demux_handler(struct irq_desc *desc) { struct pxa_gpio_chip *c; int loop, gpio, gpio_base, n; diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 67bd2f5d89e8..990fa9023e22 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -172,8 +172,7 @@ static struct irq_domain *sa1100_gpio_irqdomain; * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ -static void -sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc) +static void sa1100_gpio_handler(struct irq_desc *desc) { unsigned int irq, mask; diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index 458d9d7952b8..9c6b96707c9f 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c @@ -706,4 +706,3 @@ module_exit(sx150x_exit); MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("i2c:sx150x"); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 9b14aafb576d..027e5f47dd28 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -266,7 +266,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); } -static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tegra_gpio_irq_handler(struct irq_desc *desc) { int port; int pin; diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 5a492054589f..30653e6319e9 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -192,7 +192,7 @@ out: return ret; } -static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) +static void timbgpio_irq(struct irq_desc *desc) { struct timbgpio *tgpio = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c index bbac92ae4c32..87bb1b1eee8d 100644 --- a/drivers/gpio/gpio-tz1090.c +++ b/drivers/gpio/gpio-tz1090.c @@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on) #define gpio_set_irq_wake NULL #endif -static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tz1090_gpio_irq_handler(struct irq_desc *desc) { irq_hw_number_t hw; unsigned int irq_stat, irq_no; @@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) == IRQ_TYPE_EDGE_BOTH) tz1090_gpio_irq_next_edge(bank, hw); - generic_handle_irq_desc(irq_no, child_desc); + generic_handle_irq_desc(child_desc); } } diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 3d5714d4f405..069f9e4b7daa 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -120,7 +120,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, return pinctrl_gpio_direction_output(chip->base + gpio); } -static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void vf610_gpio_irq_handler(struct irq_desc *desc) { struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -176,9 +176,9 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type) port->irqc[d->hwirq] = irqc; if (type & IRQ_TYPE_LEVEL_MASK) - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); else - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); return 0; } diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c index 12ee1969298c..4b8a26910705 100644 --- a/drivers/gpio/gpio-zx.c +++ b/drivers/gpio/gpio-zx.c @@ -177,7 +177,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger) return 0; } -static void zx_irq_handler(unsigned irq, struct irq_desc *desc) +static void zx_irq_handler(struct irq_desc *desc) { unsigned long pending; int offset; diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 27348e7cb705..1d1a5865ede9 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -514,7 +514,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, * application for that pin. * Note: A bug is reported if no handler is set for the gpio pin. */ -static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) +static void zynq_gpio_irqhandler(struct irq_desc *desc) { u32 int_sts, int_enb; unsigned int bank_num; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 980c1f87866a..5db3445552b1 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1174,15 +1174,16 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low); * that the GPIO was actually requested. */ -static bool _gpiod_get_raw_value(const struct gpio_desc *desc) +static int _gpiod_get_raw_value(const struct gpio_desc *desc) { struct gpio_chip *chip; - bool value; int offset; + int value; chip = desc->chip; offset = gpio_chip_hwgpio(desc); - value = chip->get ? chip->get(chip, offset) : false; + value = chip->get ? chip->get(chip, offset) : -EIO; + value = value < 0 ? value : !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } @@ -1192,7 +1193,7 @@ static bool _gpiod_get_raw_value(const struct gpio_desc *desc) * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding - * its ACTIVE_LOW status. + * its ACTIVE_LOW status, or negative errno on failure. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. @@ -1212,7 +1213,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value); * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into - * account. + * account, or negative errno on failure. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. @@ -1226,6 +1227,9 @@ int gpiod_get_value(const struct gpio_desc *desc) WARN_ON(desc->chip->can_sleep); value = _gpiod_get_raw_value(desc); + if (value < 0) + return value; + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; @@ -1548,7 +1552,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding - * its ACTIVE_LOW status. + * its ACTIVE_LOW status, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ @@ -1566,7 +1570,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into - * account. + * account, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ @@ -1579,6 +1583,9 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc) return 0; value = _gpiod_get_raw_value(desc); + if (value < 0) + return value; + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 668939a14206..6647fb26ef25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size; extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; +extern int amdgpu_enable_semaphores; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); @@ -890,7 +891,7 @@ struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler *scheduler; + struct amd_gpu_scheduler sched; spinlock_t fence_lock; struct mutex *ring_lock; @@ -1201,8 +1202,6 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_inst_irq; /* gfx status */ uint32_t gfx_current_status; - /* sync signal for const engine */ - unsigned ce_sync_offs; /* ce ram size*/ unsigned ce_ram_size; }; @@ -1274,8 +1273,10 @@ struct amdgpu_job { uint32_t num_ibs; struct mutex job_lock; struct amdgpu_user_fence uf; - int (*free_job)(struct amdgpu_job *sched_job); + int (*free_job)(struct amdgpu_job *job); }; +#define to_amdgpu_job(sched_job) \ + container_of((sched_job), struct amdgpu_job, base) static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 496ed2192eba..84d68d658f8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return -ENOMEM; r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); if (r) { dev_err(rdev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 77f1d7c6ea3a..9416e0f5c1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) /* disp clock */ adev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (adev->clock.default_dispclk == 0) - adev->clock.default_dispclk = 54000; /* 540 Mhz */ + /* set a reasonable default for DP */ + if (adev->clock.default_dispclk < 53900) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; + } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); adev->clock.current_dispclk = adev->clock.default_dispclk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 98d59ee640ce..cd639c362df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, int time; n = AMDGPU_BENCHMARK_ITERATIONS; - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, + NULL, &sobj); if (r) { goto out_cleanup; } @@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, if (r) { goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, + NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 6b1243f9f86d..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); if (ret) return ret; ret = amdgpu_bo_reserve(bo, false); @@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, true, domain, flags, - NULL, &placement, &obj); + NULL, &placement, NULL, + &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3b355aeb62fd..fd16652aa277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; - uint64_t *chunk_array = NULL; + uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; - int r = 0; + unsigned size; + int i; + int ret; - if (!cs->in.num_chunks) - goto out; + if (cs->in.num_chunks == 0) + return 0; + + chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); if (!p->ctx) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_chunk; } + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); - if (chunk_array == NULL) { - r = -ENOMEM; - goto out; - } - - chunk_array_user = (uint64_t __user *)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto put_bo_list; } p->nchunks = cs->in.num_chunks; p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), GFP_KERNEL); - if (p->chunks == NULL) { - r = -ENOMEM; - goto out; + if (!p->chunks) { + ret = -ENOMEM; + goto put_bo_list; } for (i = 0; i < p->nchunks; i++) { @@ -197,28 +197,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)chunk_array[i]; + chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { - r = -EFAULT; - goto out; + ret = -EFAULT; + i--; + goto free_partial_kdata; } p->chunks[i].chunk_id = user_chunk.chunk_id; p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)user_chunk.chunk_data; + cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { - r = -ENOMEM; - goto out; + ret = -ENOMEM; + i--; + goto free_partial_kdata; } size *= sizeof(uint32_t); if (copy_from_user(p->chunks[i].kdata, cdata, size)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto free_partial_kdata; } switch (p->chunks[i].chunk_id) { @@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, handle); if (gobj == NULL) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } p->uf.bo = gem_to_amdgpu_bo(gobj); p->uf.offset = fence_data->offset; } else { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } break; @@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break; default: - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } } p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!p->ibs) - r = -ENOMEM; + if (!p->ibs) { + ret = -ENOMEM; + goto free_all_kdata; + } -out: kfree(chunk_array); - return r; + return 0; + +free_all_kdata: + i = p->nchunks - 1; +free_partial_kdata: + for (; i >= 0; i--) + drm_free_large(p->chunks[i].kdata); + kfree(p->chunks); +put_bo_list: + if (p->bo_list) + amdgpu_bo_list_put(p->bo_list); + amdgpu_ctx_put(p->ctx); +free_chunk: + kfree(chunk_array); + + return ret; } /* Returns how many bytes TTM can move per IB. @@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } -int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) +int amdgpu_cs_list_validate(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct list_head *validated) { - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_device *adev = p->adev; struct amdgpu_bo_list_entry *lobj; - struct list_head duplicates; struct amdgpu_bo *bo; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); int r; - INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) { - return r; - } - - list_for_each_entry(lobj, &p->validated, tv.head) { + list_for_each_entry(lobj, validated, tv.head) { bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; @@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) domain = lobj->allowed_domains; goto retry; } - ttm_eu_backoff_reservation(&p->ticket, &p->validated); return r; } } @@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_cs_buckets buckets; + struct list_head duplicates; bool need_mmap_lock = false; int i, r; @@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (need_mmap_lock) down_read(¤t->mm->mmap_sem); - r = amdgpu_cs_list_validate(p); + INIT_LIST_HEAD(&duplicates); + r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); + if (unlikely(r != 0)) + goto error_reserve; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + if (r) + goto error_validate; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + +error_validate: + if (r) + ttm_eu_backoff_reservation(&p->ticket, &p->validated); +error_reserve: if (need_mmap_lock) up_read(¤t->mm->mmap_sem); @@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return 0; } -static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) +static int amdgpu_cs_free_job(struct amdgpu_job *job) { int i; - if (sched_job->ibs) - for (i = 0; i < sched_job->num_ibs; i++) - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); - kfree(sched_job->ibs); - if (sched_job->uf.bo) - drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); + if (job->ibs) + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); + if (job->uf.bo) + drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); return 0; } @@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(parser, r, false); + kfree(parser); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; @@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) return -ENOMEM; - job->base.sched = ring->scheduler; + job->base.sched = &ring->sched; job->base.s_entity = &parser->ctx->rings[ring->idx].entity; job->adev = parser->adev; job->ibs = parser->ibs; @@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->free_job = amdgpu_cs_free_job; mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job((struct amd_sched_job *)job); + r = amd_sched_entity_push_job(&job->base); if (r) { mutex_unlock(&job->job_lock); amdgpu_cs_free_job(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 20cbc4eb5a6f..e0b80ccdfe8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, for (i = 0; i < adev->num_rings; i++) { struct amd_sched_rq *rq; if (kernel) - rq = &adev->rings[i]->scheduler->kernel_rq; + rq = &adev->rings[i]->sched.kernel_rq; else - rq = &adev->rings[i]->scheduler->sched_rq; - r = amd_sched_entity_init(adev->rings[i]->scheduler, + rq = &adev->rings[i]->sched.sched_rq; + r = amd_sched_entity_init(&adev->rings[i]->sched, &ctx->rings[i].entity, rq, amdgpu_sched_jobs); if (r) @@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, if (i < adev->num_rings) { for (j = 0; j < i; j++) - amd_sched_entity_fini(adev->rings[j]->scheduler, + amd_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx); return r; @@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) if (amdgpu_enable_scheduler) { for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(adev->rings[i]->scheduler, + amd_sched_entity_fini(&adev->rings[i]->sched, &ctx->rings[i].entity); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6ff6ae945794..6068d8207d10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->vram_scratch.robj); + NULL, NULL, &adev->vram_scratch.robj); if (r) { return r; } @@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) if (adev->wb.wb_obj == NULL) { r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + &adev->wb.wb_obj); if (r) { dev_warn(adev->dev, "(%d) create WB bo failed\n", r); return r; @@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); /* turn off display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } + drm_modeset_unlock_all(dev); /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (fbcon) { drm_helper_resume_force_mode(dev); /* turn on display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e3d70772b531..dc29ed8145c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0fcc0bd1622c..b190c2a83680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0; int amdgpu_enable_scheduler = 0; int amdgpu_sched_jobs = 16; int amdgpu_sched_hw_submission = 2; +int amdgpu_enable_semaphores = 1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); +MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); +module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -238,11 +242,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8a122b1b7786..96290d9cddca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) return true; return false; } + +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) +{ + struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!afbdev) + return; + + fb_helper = &afbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1be2bd6d07ea..b3fc26c59787 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). */ -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { - int i; + int i, r; ring->fence_drv.cpu_addr = NULL; ring->fence_drv.gpu_addr = 0; @@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) amdgpu_fence_check_lockup); ring->fence_drv.ring = ring; + init_waitqueue_head(&ring->fence_drv.fence_queue); + if (amdgpu_enable_scheduler) { - ring->scheduler = amd_sched_create(&amdgpu_sched_ops, - ring->idx, - amdgpu_sched_hw_submission, - (void *)ring->adev); - if (!ring->scheduler) - DRM_ERROR("Failed to create scheduler on ring %d.\n", - ring->idx); + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + amdgpu_sched_hw_submission, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; + } } + + return 0; } /** @@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) wake_up_all(&ring->fence_drv.fence_queue); amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); - if (ring->scheduler) - amd_sched_destroy(ring->scheduler); + amd_sched_fini(&ring->sched); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index cbd3a486c5c2..7312d729d300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gart.robj); + NULL, NULL, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5839fab374bf..7297ca3a0ba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, } } retry: - r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); + r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, &args->data.data_size_bytes, &args->data.flags); } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { + if (args->data.data_size_bytes > sizeof(args->data.data)) { + r = -EINVAL; + goto unreserve; + } r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); if (!r) r = amdgpu_bo_set_metadata(robj, args->data.data, @@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, args->data.flags); } +unreserve: amdgpu_bo_unreserve(robj); out: drm_gem_object_unreference_unlocked(gobj); @@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; struct ww_acquire_ctx ticket; - struct list_head list; + struct list_head list, duplicates; unsigned domain; int r; INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); tv.bo = &bo_va->bo->tbo; tv.shared = true; @@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + /* Provide duplicates to avoid -EALREADY */ + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_free; @@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int r; args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); - args->size = args->pitch * args->height; + args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); r = amdgpu_gem_object_create(adev, args->size, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 5c8a803acedc..534fc04e80fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &adev->irq.ih.ring_obj); + NULL, NULL, &adev->irq.ih.ring_obj); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 0aba8e9bc8a0..7c42ff670080 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev) */ int amdgpu_irq_postinstall(struct drm_device *dev) { - dev->max_vblank_count = 0x001fffff; + dev->max_vblank_count = 0x00ffffff; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 22367939ebf1..5d11e798230c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size = info->read_mmr_reg.count * 4; + unsigned n, alloc_size; uint32_t *regs; unsigned se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & @@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; - regs = kmalloc(alloc_size, GFP_KERNEL); + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; + alloc_size = info->read_mmr_reg.count * sizeof(*regs); for (i = 0; i < info->read_mmr_reg.count; i++) if (amdgpu_asic_read_register(adev, se_num, sh_num, @@ -484,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * amdgpu_driver_firstopen_kms - drm callback for last close + * amdgpu_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -492,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void amdgpu_driver_lastclose_kms(struct drm_device *dev) { + struct amdgpu_device *adev = dev->dev_private; + + amdgpu_fbdev_restore_mode(adev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52e65..7bd470d9ac30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); int amdgpu_fbdev_total_size(struct amdgpu_device *adev); bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 08b09d55b96f..1a7708f365f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; @@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, /* Kernel allocation are uninterruptible */ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); + acc_size, sg, resv, &amdgpu_ttm_bo_destroy); if (unlikely(r != 0)) { return r; } @@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, - struct sg_table *sg, struct amdgpu_bo **bo_ptr) + struct sg_table *sg, + struct reservation_object *resv, + struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; @@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, - kernel, domain, flags, - sg, - &placement, - bo_ptr); + return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (metadata == NULL) return -EINVAL; - buffer = kzalloc(metadata_size, GFP_KERNEL); + buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; - memcpy(buffer, metadata, metadata_size); - kfree(bo->metadata); bo->metadata_flags = flags; bo->metadata = buffer; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 6ea18dcec561..3c2ff4567798 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index efed11509f4a..ed2bbe5b10af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index d9652fe32d6a..59f735a933a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { + struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; int ret; + ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 9bec91484c24..30dce235ddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - amdgpu_fence_driver_init_ring(ring); + r = amdgpu_fence_driver_init_ring(ring); + if (r) + return r; } - init_waitqueue_head(&ring->fence_drv.fence_queue); - r = amdgpu_wb_get(adev, &ring->rptr_offs); if (r) { dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); @@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, if (ring->ring_obj == NULL) { r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &ring->ring_obj); + NULL, NULL, &ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 74dad270362c..e90712443fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); } - r = amdgpu_bo_create(adev, size, align, true, - domain, 0, NULL, &sa_manager->bo); + r = amdgpu_bo_create(adev, size, align, true, domain, + 0, NULL, NULL, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; @@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) struct amd_sched_fence *s_fence; s_fence = to_amd_sched_fence(f); - if (s_fence) - return s_fence->scheduler->ring_id; + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->idx; + } + a_fence = to_amdgpu_fence(f); if (a_fence) return a_fence->ring->idx; @@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, } #if defined(CONFIG_DEBUG_FS) + +static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(fence); + struct amd_sched_fence *s_fence = to_amd_sched_fence(fence); + + if (a_fence) + seq_printf(m, " protected by 0x%016llx on ring %d", + a_fence->seq, a_fence->ring->idx); + + if (s_fence) { + struct amdgpu_ring *ring; + + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + seq_printf(m, " protected by 0x%016x on ring %d", + s_fence->base.seqno, ring->idx); + } +} + void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m) { @@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, } seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", soffset, eoffset, eoffset - soffset); - if (i->fence) { - struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); - struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence); - if (a_fence) - seq_printf(m, " protected by 0x%016llx on ring %d", - a_fence->seq, a_fence->ring->idx); - if (s_fence) - seq_printf(m, " protected by 0x%016x on ring %d", - s_fence->base.seqno, - s_fence->scheduler->ring_id); - - } + if (i->fence) + amdgpu_sa_bo_dump_fence(i->fence, m); seq_printf(m, "\n"); } spin_unlock(&sa_manager->wq.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index de98fbd2971e..2e946b2cad88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -27,63 +27,48 @@ #include <drm/drmP.h> #include "amdgpu.h" -static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) +static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { - struct amdgpu_job *sched_job = (struct amdgpu_job *)job; - return amdgpu_sync_get_fence(&sched_job->ibs->sync); + struct amdgpu_job *job = to_amdgpu_job(sched_job); + return amdgpu_sync_get_fence(&job->ibs->sync); } -static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) +static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) { - struct amdgpu_job *sched_job; - struct amdgpu_fence *fence; + struct amdgpu_fence *fence = NULL; + struct amdgpu_job *job; int r; - if (!job) { + if (!sched_job) { DRM_ERROR("job is null\n"); return NULL; } - sched_job = (struct amdgpu_job *)job; - mutex_lock(&sched_job->job_lock); - r = amdgpu_ib_schedule(sched_job->adev, - sched_job->num_ibs, - sched_job->ibs, - sched_job->base.owner); - if (r) + job = to_amdgpu_job(sched_job); + mutex_lock(&job->job_lock); + r = amdgpu_ib_schedule(job->adev, + job->num_ibs, + job->ibs, + job->base.owner); + if (r) { + DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; - fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); - - if (sched_job->free_job) - sched_job->free_job(sched_job); + } - mutex_unlock(&sched_job->job_lock); - return &fence->base; + fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); err: - DRM_ERROR("Run job error\n"); - mutex_unlock(&sched_job->job_lock); - job->sched->ops->process_job(job); - return NULL; -} + if (job->free_job) + job->free_job(job); -static void amdgpu_sched_process_job(struct amd_sched_job *job) -{ - struct amdgpu_job *sched_job; - - if (!job) { - DRM_ERROR("job is null\n"); - return; - } - sched_job = (struct amdgpu_job *)job; - /* after processing job, free memory */ - fence_put(&sched_job->base.s_fence->base); - kfree(sched_job); + mutex_unlock(&job->job_lock); + fence_put(&job->base.s_fence->base); + kfree(job); + return fence ? &fence->base : NULL; } struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_sched_dependency, .run_job = amdgpu_sched_run_job, - .process_job = amdgpu_sched_process_job }; int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, @@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) return -ENOMEM; - job->base.sched = ring->scheduler; + job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; job->adev = adev; job->ibs = ibs; @@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, mutex_init(&job->job_lock); job->free_job = free_job; mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job((struct amd_sched_job *)job); + r = amd_sched_entity_push_job(&job->base); if (r) { mutex_unlock(&job->job_lock); kfree(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 068aeaff7183..4921de15b451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) if (a_fence) return a_fence->ring->adev == adev; - if (s_fence) - return (struct amdgpu_device *)s_fence->scheduler->priv == adev; + + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->adev == adev; + } + return false; } @@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync) fence_put(e->fence); kfree(e); } + + if (amdgpu_enable_semaphores) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_fence *fence = sync->sync_to[i]; + if (!fence) + continue; + + r = fence_wait(&fence->base, false); + if (r) + return r; + } + return 0; } @@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || + (count >= AMDGPU_NUM_SYNCS)) { /* not enough room, wait manually */ r = fence_wait(&fence->base, false); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index f80b1a43be8a..4865615e9c06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &vram_obj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, + NULL, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + NULL, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b5abd5cde413..364cbe975332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->stollen_vga_memory); + NULL, NULL, &adev->stollen_vga_memory); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 482e66797ae6..5cc95f1a7dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) const struct common_firmware_header *header = NULL; err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); err = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2cf6c6b06e3b..d0312364d950 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->uvd.vcpu_bo); + NULL, NULL, &adev->uvd.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, return -EINVAL; } - if (msg_type == 1) { + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + amdgpu_bo_kunmap(bo); + + /* try to alloc a new handle */ + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&adev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { + adev->uvd.filp[i] = ctx->parser->filp; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: /* it's a decode msg, calc buffer sizes */ r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); amdgpu_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* validate the handle */ + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&adev->uvd.handles[i]) == handle) { + if (adev->uvd.filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; - } else { - /* it's a create msg */ - amdgpu_bo_kunmap(bo); - - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) - return 0; - } - /* handle not found try to alloc a new one */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { - adev->uvd.filp[i] = ctx->parser->filp; - return 0; - } + default: + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } @@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) } static int amdgpu_uvd_free_job( - struct amdgpu_job *sched_job) + struct amdgpu_job *job) { - amdgpu_ib_free(sched_job->adev, sched_job->ibs); - kfree(sched_job->ibs); + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); return 0; } @@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &bo); + NULL, NULL, &bo); if (r) return r; @@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &bo); + NULL, NULL, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 3cab96c42aa8..74f2038ac747 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->vce.vcpu_bo); + NULL, NULL, &adev->vce.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); return r; @@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } static int amdgpu_vce_free_job( - struct amdgpu_job *sched_job) + struct amdgpu_job *job) { - amdgpu_ib_free(sched_job->adev, sched_job->ibs); - kfree(sched_job->ibs); + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f68b7cdc370a..53d551f2d839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } -int amdgpu_vm_free_job(struct amdgpu_job *sched_job) +int amdgpu_vm_free_job(struct amdgpu_job *job) { int i; - for (i = 0; i < sched_job->num_ibs; i++) - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); - kfree(sched_job->ibs); + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); return 0; } @@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, return -ENOMEM; r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) + if (r) { + kfree(ib); return r; + } ib->length_dw = 0; /* walk over the address space and update the page directory */ @@ -686,31 +688,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, } /** - * amdgpu_vm_fence_pts - fence page tables after an update - * - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * @fence: fence to use - * - * Fence the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! - */ -static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, - uint64_t start, uint64_t end, - struct fence *fence) -{ - unsigned i; - - start >>= amdgpu_vm_block_size; - end >>= amdgpu_vm_block_size; - - for (i = start; i <= end; ++i) - amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); -} - -/** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer @@ -813,8 +790,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_fence_pts(vm, mapping->it.start, - mapping->it.last + 1, f); + amdgpu_bo_fence(vm->page_directory, f, true); if (fence) { fence_put(*fence); *fence = fence_get(f); @@ -855,7 +831,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, int r; if (mem) { - addr = mem->start << PAGE_SHIFT; + addr = (u64)mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_TT) addr += adev->vm_manager.vram_base_offset; } else { @@ -1089,6 +1065,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { + struct reservation_object *resv = vm->page_directory->tbo.resv; struct amdgpu_bo *pt; if (vm->page_tables[pt_idx].bo) @@ -1097,11 +1074,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* drop mutex to allocate and clear page table */ mutex_unlock(&vm->mutex); + ww_mutex_lock(&resv->lock, NULL); r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, - NULL, &pt); + NULL, resv, &pt); + ww_mutex_unlock(&resv->lock); if (r) goto error_free; @@ -1303,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, - NULL, &vm->page_directory); + NULL, NULL, &vm->page_directory); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 82e8d0730517..a1a35a5df8e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle) if (!amdgpu_dpm) return 0; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + ret = ci_set_temperature_range(adev); if (ret) return ret; @@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4b6ce74753cd..484710cfdf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) int ret, i; u16 tmp16; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 44fa96ad4709..2e3373ed4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_dpm) { + int ret; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + /* powerdown unused blocks for now */ cz_dpm_powergate_uvd(adev, true); cz_dpm_powergate_vce(adev, true); @@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle) if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_init_failed; - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index a72ffc7d6c26..e33180d3314a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev) * 3. map kernel virtual address */ ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + toc_buf); if (ret) { dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); @@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev) } ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + smu_buf); if (ret) { dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e4d101b1252a..d4c82b625727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v10_0_page_flip - pageflip callback. * @@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; @@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle) dce_v10_0_hpd_fini(adev); + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle) /* initialize hpd */ dce_v10_0_hpd_init(adev); + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6411e8244671..7e1cf5e4eebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v11_0_page_flip - pageflip callback. * @@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; @@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle) switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 4; + adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; @@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle) dce_v11_0_hpd_fini(adev); + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle) /* initialize hpd */ dce_v11_0_hpd_init(adev); + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c86911c2ea2a..34b9c2a9d8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v8_0_page_flip - pageflip callback. * @@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; @@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle) dce_v8_0_hpd_fini(adev); + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle) /* initialize hpd */ dce_v8_0_hpd_init(adev); + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index 322edea65857..bda1249eb871 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; @@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, smu_buf); + NULL, NULL, smu_buf); if (ret) { DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 4bd1e5cf65ca..e992bf2ff66c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, sizeof(struct bonaire_mqd), PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); @@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - - /* let CE wait till condition satisfied */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - amdgpu_ring_write(ring, 0xffffffff); - amdgpu_ring_write(ring, 4); /* poll interval */ - - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - /* * vm * VMID 0 is the physical GPU addresses as used by the kernel. @@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | @@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0x0); /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v7_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } @@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.save_restore_obj); + NULL, NULL, + &adev->gfx.rlc.save_restore_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.clear_state_obj); + NULL, NULL, + &adev->gfx.rlc.clear_state_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, &adev->gfx.rlc.cp_table_obj); + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; @@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle) r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, &adev->gds.gds_gfx_bo); + NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, &adev->gds.gws_gfx_bo); + NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_OA, 0, - NULL, &adev->gds.oa_gfx_bo); + NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 53f07439a512..cb4f68f53f24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - /* set up the gfx ring */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle) /* reserve GDS, GWS and OA resource for gfx */ r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, + AMDGPU_GEM_DOMAIN_GDS, 0, NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, + AMDGPU_GEM_DOMAIN_GWS, 0, NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, + AMDGPU_GEM_DOMAIN_OA, 0, NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v8_0_mec_fini(adev); return 0; @@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) sizeof(struct vi_mqd), PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - &ring->mqd_obj); + NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); return r; @@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); + } /** @@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, return true; } -static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) +static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint64_t addr = ring->fence_drv.gpu_addr; - /* let CE wait till condition satisfied */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3))); /* equal */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 4); /* poll interval */ - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - -static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) -{ - int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | - WRITE_DATA_DST_SEL(0))); + WRITE_DATA_DST_SEL(0)) | + WR_CONFIRM); if (vm_id < 8) { amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); @@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* sync PFP to ME, otherwise we might get invalid PFP reads */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); - - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v8_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index c900aa942ade..966d4b2ed9da 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 94ec04a9c4d5..7e9154c7f1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2995,6 +2995,15 @@ static int kv_dpm_late_init(void *handle) { /* powerdown unused blocks for now */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (!amdgpu_dpm) + return 0; + + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); @@ -3038,9 +3047,6 @@ static int kv_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 1f5ac941a610..5421309c1862 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, toc_buf); + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; @@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev) ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, smu_buf); + NULL, NULL, smu_buf); if (ret) { DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5fac5da694f0..ed50dd725788 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v4_2_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); + r = uvd_v4_2_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 2d5c59c318af..9ad8b9906c0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v5_0_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); + r = uvd_v5_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d9f553fce531..7e9934fa4193 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + } r = uvd_v6_0_hw_fini(adev); if (r) return r; - r = amdgpu_uvd_suspend(adev); - if (r) - return r; - return r; } @@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_resume(adev); + if (r) + return r; + } r = uvd_v6_0_hw_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 552d9e75ad1b..0bac8702e934 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) u32 mask; int ret; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; @@ -1400,7 +1403,8 @@ static int vi_common_early_init(void *handle) case CHIP_CARRIZO: adev->has_uvd = true; adev->cg_flags = 0; - adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; + /* Disable UVD pg */ + adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x1; if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -27,19 +27,6 @@ #include "cgs_common.h" /** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - -/** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source * @src_id: interrupt source ID @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h new file mode 100644 index 000000000000..144f50acc971 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -0,0 +1,41 @@ +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_sched +#define TRACE_INCLUDE_FILE gpu_sched_trace + +TRACE_EVENT(amd_sched_job, + TP_PROTO(struct amd_sched_job *sched_job), + TP_ARGS(sched_job), + TP_STRUCT__entry( + __field(struct amd_sched_entity *, entity) + __field(const char *, name) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = sched_job->s_entity; + __entry->name = sched_job->sched->name; + __entry->job_count = kfifo_len( + &sched_job->s_entity->job_queue) / sizeof(sched_job); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->name, __entry->job_count, + __entry->hw_job_count) +); +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 9259f1b6664c..3697eeeecf82 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -27,6 +27,9 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" +#define CREATE_TRACE_POINTS +#include "gpu_sched_trace.h" + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); @@ -65,29 +68,29 @@ static struct amd_sched_job * amd_sched_rq_select_job(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *job; + struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - job = amd_sched_entity_pop_job(entity); - if (job) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { rq->current_entity = entity; spin_unlock(&rq->lock); - return job; + return sched_job; } } } list_for_each_entry(entity, &rq->entities, list) { - job = amd_sched_entity_pop_job(entity); - if (job) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { rq->current_entity = entity; spin_unlock(&rq->lock); - return job; + return sched_job; } if (entity == rq->current_entity) @@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, struct amd_sched_rq *rq, uint32_t jobs) { + int r; + if (!(sched && entity && rq)) return -EINVAL; memset(entity, 0, sizeof(struct amd_sched_entity)); - entity->belongto_rq = rq; - entity->scheduler = sched; - entity->fence_context = fence_context_alloc(1); - if(kfifo_alloc(&entity->job_queue, - jobs * sizeof(void *), - GFP_KERNEL)) - return -EINVAL; + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; spin_lock_init(&entity->queue_lock); + r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); + if (r) + return r; + atomic_set(&entity->fence_seq, 0); + entity->fence_context = fence_context_alloc(1); /* Add the entity to the run queue */ amd_sched_rq_add_entity(rq, entity); + return 0; } @@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { - return entity->scheduler == sched && - entity->belongto_rq != NULL; + return entity->sched == sched && + entity->rq != NULL; } /** @@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { - struct amd_sched_rq *rq = entity->belongto_rq; + struct amd_sched_rq *rq = entity->rq; if (!amd_sched_entity_is_initialized(sched, entity)) return; @@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; fence_put(f); - amd_sched_wakeup(entity->scheduler); + amd_sched_wakeup(entity->sched); } static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { - struct amd_gpu_scheduler *sched = entity->scheduler; - struct amd_sched_job *job; + struct amd_gpu_scheduler *sched = entity->sched; + struct amd_sched_job *sched_job; if (ACCESS_ONCE(entity->dependency)) return NULL; - if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) + if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(job))) { + while ((entity->dependency = sched->ops->dependency(sched_job))) { if (fence_add_callback(entity->dependency, &entity->cb, amd_sched_entity_wakeup)) @@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) return NULL; } - return job; + return sched_job; } /** * Helper to submit a job to the job queue * - * @job The pointer to job required to submit + * @sched_job The pointer to job required to submit * * Returns true if we could submit the job. */ -static bool amd_sched_entity_in(struct amd_sched_job *job) +static bool amd_sched_entity_in(struct amd_sched_job *sched_job) { - struct amd_sched_entity *entity = job->s_entity; + struct amd_sched_entity *entity = sched_job->s_entity; bool added, first = false; spin_lock(&entity->queue_lock); - added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); + added = kfifo_in(&entity->job_queue, &sched_job, + sizeof(sched_job)) == sizeof(sched_job); - if (added && kfifo_len(&entity->job_queue) == sizeof(job)) + if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) first = true; spin_unlock(&entity->queue_lock); /* first job wakes up scheduler */ if (first) - amd_sched_wakeup(job->sched); + amd_sched_wakeup(sched_job->sched); return added; } @@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job) /** * Submit a job to the job queue * - * @job The pointer to job required to submit + * @sched_job The pointer to job required to submit * * Returns 0 for success, negative error code otherwise. */ @@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) fence_get(&fence->base); sched_job->s_fence = fence; - wait_event(entity->scheduler->job_scheduled, + wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); - + trace_amd_sched_job(sched_job); return 0; } @@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) static struct amd_sched_job * amd_sched_select_job(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *job; + struct amd_sched_job *sched_job; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - job = amd_sched_rq_select_job(&sched->kernel_rq); - if (job == NULL) - job = amd_sched_rq_select_job(&sched->sched_rq); + sched_job = amd_sched_rq_select_job(&sched->kernel_rq); + if (sched_job == NULL) + sched_job = amd_sched_rq_select_job(&sched->sched_rq); - return job; + return sched_job; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *sched_job = - container_of(cb, struct amd_sched_job, cb); - struct amd_gpu_scheduler *sched; + struct amd_sched_fence *s_fence = + container_of(cb, struct amd_sched_fence, cb); + struct amd_gpu_scheduler *sched = s_fence->sched; - sched = sched_job->sched; - amd_sched_fence_signal(sched_job->s_fence); atomic_dec(&sched->hw_rq_count); - fence_put(&sched_job->s_fence->base); - sched->ops->process_job(sched_job); + amd_sched_fence_signal(s_fence); + fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } @@ -338,87 +344,82 @@ static int amd_sched_main(void *param) while (!kthread_should_stop()) { struct amd_sched_entity *entity; - struct amd_sched_job *job; + struct amd_sched_fence *s_fence; + struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, kthread_should_stop() || - (job = amd_sched_select_job(sched))); + (sched_job = amd_sched_select_job(sched))); - if (!job) + if (!sched_job) continue; - entity = job->s_entity; + entity = sched_job->s_entity; + s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - fence = sched->ops->run_job(job); + fence = sched->ops->run_job(sched_job); if (fence) { - r = fence_add_callback(fence, &job->cb, + r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) - amd_sched_process_job(fence, &job->cb); + amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); } - count = kfifo_out(&entity->job_queue, &job, sizeof(job)); - WARN_ON(count != sizeof(job)); + count = kfifo_out(&entity->job_queue, &sched_job, + sizeof(sched_job)); + WARN_ON(count != sizeof(sched_job)); wake_up(&sched->job_scheduled); } return 0; } /** - * Create a gpu scheduler + * Init a gpu scheduler instance * + * @sched The pointer to the scheduler * @ops The backend operations for this scheduler. - * @ring The the ring id for the scheduler. * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging * - * Return the pointer to scheduler for success, otherwise return NULL + * Return 0 on success, otherwise error code. */ -struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, - unsigned ring, unsigned hw_submission, - void *priv) +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + unsigned hw_submission, const char *name) { - struct amd_gpu_scheduler *sched; - - sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); - if (!sched) - return NULL; - sched->ops = ops; - sched->ring_id = ring; sched->hw_submission_limit = hw_submission; - sched->priv = priv; - snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring); + sched->name = name; amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->kernel_rq); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); if (IS_ERR(sched->thread)) { - DRM_ERROR("Failed to create scheduler for id %d.\n", ring); - kfree(sched); - return NULL; + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); } - return sched; + return 0; } /** * Destroy a gpu scheduler * * @sched The pointer to the scheduler - * - * return 0 if succeed. -1 if failed. */ -int amd_sched_destroy(struct amd_gpu_scheduler *sched) +void amd_sched_fini(struct amd_gpu_scheduler *sched) { kthread_stop(sched->thread); - kfree(sched); - return 0; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 2af0e4d4d817..80b64dc22214 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -38,13 +38,15 @@ struct amd_sched_rq; */ struct amd_sched_entity { struct list_head list; - struct amd_sched_rq *belongto_rq; - atomic_t fence_seq; - /* the job_queue maintains the jobs submitted by clients */ - struct kfifo job_queue; + struct amd_sched_rq *rq; + struct amd_gpu_scheduler *sched; + spinlock_t queue_lock; - struct amd_gpu_scheduler *scheduler; + struct kfifo job_queue; + + atomic_t fence_seq; uint64_t fence_context; + struct fence *dependency; struct fence_cb cb; }; @@ -62,13 +64,13 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; - struct amd_gpu_scheduler *scheduler; + struct fence_cb cb; + struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; }; struct amd_sched_job { - struct fence_cb cb; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; @@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - struct fence *(*dependency)(struct amd_sched_job *job); - struct fence *(*run_job)(struct amd_sched_job *job); - void (*process_job)(struct amd_sched_job *job); + struct fence *(*dependency)(struct amd_sched_job *sched_job); + struct fence *(*run_job)(struct amd_sched_job *sched_job); }; /** * One scheduler is implemented for each hardware ring */ struct amd_gpu_scheduler { - struct task_struct *thread; + struct amd_sched_backend_ops *ops; + uint32_t hw_submission_limit; + const char *name; struct amd_sched_rq sched_rq; struct amd_sched_rq kernel_rq; - atomic_t hw_rq_count; - struct amd_sched_backend_ops *ops; - uint32_t ring_id; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; - uint32_t hw_submission_limit; - char name[20]; - void *priv; + atomic_t hw_rq_count; + struct task_struct *thread; }; -struct amd_gpu_scheduler * -amd_sched_create(struct amd_sched_backend_ops *ops, - uint32_t ring, uint32_t hw_submission, void *priv); -int amd_sched_destroy(struct amd_gpu_scheduler *sched); +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + uint32_t hw_submission, const char *name); +void amd_sched_fini(struct amd_gpu_scheduler *sched); int amd_sched_entity_init(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index e62c37920e11..d802638094f4 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity if (fence == NULL) return NULL; fence->owner = owner; - fence->scheduler = s_entity->scheduler; + fence->sched = s_entity->sched; spin_lock_init(&fence->lock); seq = atomic_inc_return(&s_entity->fence_seq); @@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence) static const char *amd_sched_fence_get_timeline_name(struct fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); - return (const char *)fence->scheduler->name; + return (const char *)fence->sched->name; } static bool amd_sched_fence_enable_signaling(struct fence *f) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..809959d56d78 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1182,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - if (!port->mstb) { + mstb = port->mstb; + if (!mstb) { DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); - return NULL; + goto out; } - mstb = port->mstb; break; } } } kref_get(&mstb->kref); +out: mutex_unlock(&mgr->lock); return mstb; } @@ -1202,10 +1215,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1470,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1479,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1512,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1992,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2279,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2687,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2710,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2766,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); @@ -2782,12 +2802,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs if (msgs[num - 1].flags & I2C_M_RD) reading = true; - if (!reading) { + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); ret = -EIO; goto out; } + memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; msg.u.i2c_read.port_number = port->port_num; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9a860ca1e9d7..d93e7378c077 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit); /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, + DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0f6cd33b531f..684bd4a13843 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, char *buf) { struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - uint64_t dpms_status; - int ret; + int dpms; - ret = drm_object_property_get_value(&connector->base, - dev->mode_config.dpms_property, - &dpms_status); - if (ret) - return 0; + dpms = READ_ONCE(connector->dpms); return snprintf(buf, PAGE_SIZE, "%s\n", - drm_get_dpms_name((int)dpms_status)); + drm_get_dpms_name(dpms)); } static ssize_t enabled_show(struct device *device, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 82be6b86a168..d1e300dcd544 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; - unsigned int index, value, ret; + unsigned int value; + int index, ret; index = fsl_dcu_drm_plane_index(plane); if (index < 0) diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f6ecbda2c604..674341708033 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, } /** - * i915_gem_shrink - Shrink buffer object caches completely + * i915_gem_shrink_all - Shrink buffer object caches completely * @dev_priv: i915 device * * This is a simple wraper around i915_gem_shrink() to aggressively shrink all diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 8fd431bcdfd3..a96b9006a51e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { * Also note, that the object created here is not currently a "first class" * object, in that several ioctls are banned. These are the CPU access * ioctls: mmap(), pwrite and pread. In practice, you are expected to use - * direct access via your pointer rather than use those ioctls. + * direct access via your pointer rather than use those ioctls. Another + * restriction is that we do not allow userptr surfaces to be pinned to the + * hardware and so we reject any attempt to create a framebuffer out of a + * userptr. * * If you think this is a good interface to use to pass GPU memory between * drivers, please use dma-buf instead. In fact, wherever possible use diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5a244ab9395b..39d73dbc1c47 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; /* + * On HSW, the DSL reg (0x70000) appears to return 0 if we + * read it just before the start of vblank. So try it again + * so we don't accidentally end up spanning a vblank frame + * increment, causing the pipe_update_end() code to squak at us. + * + * The nature of this problem means we can't simply check the ISR + * bit and return the vblank start value; nor can we use the scanline + * debug register in the transcoder as it appears to have the same + * problem. We may need to extend this to include other platforms, + * but so far testing only shows the problem on HSW. + */ + if (IS_HASWELL(dev) && !position) { + int i, temp; + + for (i = 0; i < 100; i++) { + udelay(1); + temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & + DSL_LINEMASK_GEN3; + if (temp != position) { + position = temp; + break; + } + } + } + + /* * See update_scanline_offset() for the details on the * scanline_offset adjustment. */ diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 89c1a8ce1f98..2a5c76faf9f8 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /** * intel_audio_codec_disable - Disable the audio codec for HD audio - * @encoder: encoder on which to disable audio + * @intel_encoder: encoder on which to disable audio * * The disable sequences must be performed before disabling the transcoder or * port. diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b3e437b3bb54..c19e669ffe50 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id) const struct bdb_header *bdb = _bdb; const u8 *base = _bdb; int index = 0; - u16 total, current_size; + u32 total, current_size; u8 current_id; /* skip to first section */ @@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id) current_size = *((const u16 *)(base + index)); index += 2; + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) + current_size = *((const u32 *)(base + index + 1)); + if (index + current_size > total) return NULL; @@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) return; } + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 3) { + DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); + return; + } + DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); block_size = get_blocksize(sequence); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8cc9264f7809..b2270d576979 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); } + /* + * Apparently we need to have VGA mode enabled prior to changing + * the P1/P2 dividers. Otherwise the DPLL will keep using the old + * dividers, even though the register value does change. + */ + I915_WRITE(reg, 0); + + I915_WRITE(reg, dpll); + /* Wait for the clocks to stabilize. */ POSTING_READ(reg); udelay(150); @@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; + if (obj->userptr.mm) { + DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); + return -EINVAL; + } + return drm_gem_handle_create(file, &obj->base, handle); } @@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* restore vblank interrupts to correct state */ drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { + struct intel_plane *plane; + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); drm_crtc_vblank_on(&crtc->base); + + /* Disable everything but the primary plane */ + for_each_intel_plane_on_crtc(dev, crtc, plane) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + continue; + + plane->disable_plane(&plane->base, &crtc->base); + } } /* We need to sanitize the plane -> pipe mapping first because this will @@ -15067,35 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev) i915_redisable_vga_power_on(dev); } -static bool primary_get_hw_state(struct intel_crtc *crtc) +static bool primary_get_hw_state(struct intel_plane *plane) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); + return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; } -static void readout_plane_state(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +/* FIXME read out full plane state for all planes */ +static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *p; - struct intel_plane_state *plane_state; - bool active = crtc_state->base.active; - - for_each_intel_plane(crtc->base.dev, p) { - if (crtc->pipe != p->pipe) - continue; - - plane_state = to_intel_plane_state(p->base.state); + struct drm_plane *primary = crtc->base.primary; + struct intel_plane_state *plane_state = + to_intel_plane_state(primary->state); - if (p->base.type == DRM_PLANE_TYPE_PRIMARY) - plane_state->visible = primary_get_hw_state(crtc); - else { - if (active) - p->disable_plane(&p->base, &crtc->base); + plane_state->visible = + primary_get_hw_state(to_intel_plane(primary)); - plane_state->visible = false; - } - } + if (plane_state->visible) + crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15118,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.state->active = crtc->active; crtc->base.enabled = crtc->active; - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); - if (crtc->base.state->active) { - intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); - intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); - - /* - * The initial mode needs to be set in order to keep - * the atomic core happy. It wants a valid mode if the - * crtc's enabled, so we do the above call. - * - * At this point some state updated by the connectors - * in their ->detect() callback has not run yet, so - * no recalculation can be done yet. - * - * Even if we could do a recalculation and modeset - * right now it would cause a double modeset if - * fbdev or userspace chooses a different initial mode. - * - * If that happens, someone indicated they wanted a - * mode change, which means it's safe to do a full - * recalculation. - */ - crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; - } - - crtc->base.hwmode = crtc->config->base.adjusted_mode; - readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state)); + readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", crtc->base.base.id, @@ -15204,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.name, connector->base.encoder ? "enabled" : "disabled"); } + + for_each_intel_crtc(dev, crtc) { + crtc->base.hwmode = crtc->config->base.adjusted_mode; + + memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); + if (crtc->base.state->active) { + intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); + intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); + WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); + + /* + * The initial mode needs to be set in order to keep + * the atomic core happy. It wants a valid mode if the + * crtc's enabled, so we do the above call. + * + * At this point some state updated by the connectors + * in their ->detect() callback has not run yet, so + * no recalculation can be done yet. + * + * Even if we could do a recalculation and modeset + * right now it would cause a double modeset if + * fbdev or userspace chooses a different initial mode. + * + * If that happens, someone indicated they wanted a + * mode change, which means it's safe to do a full + * recalculation. + */ + crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + } + } } /* Scan out the current hw modeset state, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..29dd4488dc49 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); @@ -1634,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..61b451fbd09e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; @@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 87de15ea1f93..b35b5b2db4ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper, sysram = vmalloc(size); if (!sysram) - return -ENOMEM; + goto err_sysram; info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) - return PTR_ERR(info); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_alloc_fbi; + } info->par = mfbdev; ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); if (ret) - return ret; + goto err_framebuffer_init; mfbdev->sysram = sysram; mfbdev->size = size; @@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); + return 0; + +err_framebuffer_init: + drm_fb_helper_release_fbi(helper); +err_alloc_fbi: + vfree(sysram); +err_sysram: + drm_gem_object_unreference_unlocked(gobj); + + return ret; } static int mga_fbdev_destroy(struct drm_device *dev, @@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev) ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); if (ret) - return ret; + goto err_fb_helper; ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); if (ret) - goto fini; + goto err_fb_setup; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(mdev->dev); ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); if (ret) - goto fini; + goto err_fb_setup; return 0; -fini: +err_fb_setup: drm_fb_helper_fini(&mfbdev->helper); +err_fb_helper: + mdev->mfbdev = NULL; + return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index de06388069e7..b1a0f5656175 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) } r = mgag200_mm_init(mdev); if (r) - goto out; + goto err_mm; drm_mode_config_init(dev); dev->mode_config.funcs = (void *)&mga_mode_funcs; @@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) r = mgag200_modeset_init(mdev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); - goto out; + goto err_modeset; } /* Make small buffers to store a hardware cursor (double buffered icon updates) */ @@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) &mdev->cursor.pixels_1); mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, &mdev->cursor.pixels_2); - if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) - goto cursor_nospace; - mdev->cursor.pixels_current = mdev->cursor.pixels_1; - mdev->cursor.pixels_prev = mdev->cursor.pixels_2; - goto cursor_done; - cursor_nospace: - mdev->cursor.pixels_1 = NULL; - mdev->cursor.pixels_2 = NULL; - dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); - cursor_done: - -out: - if (r) - mgag200_driver_unload(dev); + if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) { + mdev->cursor.pixels_1 = NULL; + mdev->cursor.pixels_2 = NULL; + dev_warn(&dev->pdev->dev, + "Could not allocate space for cursors. Not doing hardware cursors.\n"); + } else { + mdev->cursor.pixels_current = mdev->cursor.pixels_1; + mdev->cursor.pixels_prev = mdev->cursor.pixels_2; + } + + return 0; + +err_modeset: + drm_mode_config_cleanup(dev); + mgag200_mm_fini(mdev); +err_mm: + dev->dev_private = NULL; + return r; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index b1f73bee1368..b0d4b53b97f4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -178,7 +178,6 @@ static int mdp5_hw_irqdomain_map(struct irq_domain *d, irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); irq_set_chip_data(irq, mdp5_kms); - set_irq_flags(irq, IRQF_VALID); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cc6c228e11c8..e905c00acf1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev) if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; - } else { + } else + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else { + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; } dev->mode_config.preferred_depth = 24; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2791701685dc..59f27e774acb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nouveau_fbcon_fillrect, @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2c9981512d27..41be584147b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nvkm_vma *vma; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) + if (is_power_of_2(nvbo->valid_domains)) + rep->domain = nvbo->valid_domains; + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; - rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 65af31441e9c..a7d69ce7abc1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index) index = NVKM_I2C_BUS_PRI; if (init->outp && init->outp->i2c_upper_default) index = NVKM_I2C_BUS_SEC; + } else + if (index == 0x80) { + index = NVKM_I2C_BUS_PRI; + } else + if (index == 0x81) { + index = NVKM_I2C_BUS_SEC; } bus = nvkm_i2c_bus_find(i2c, index); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h index e0ec2a6b7b79..212800ecdce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h @@ -8,7 +8,10 @@ struct nvbios_source { void *(*init)(struct nvkm_bios *, const char *); void (*fini)(void *); u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); + u32 (*size)(void *); bool rw; + bool ignore_checksum; + bool no_pcir; }; int nvbios_extend(struct nvkm_bios *, u32 length); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 792f017525f6..b2557e87afdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) u32 read = mthd->func->read(data, start, limit - start, bios); bios->size = start + read; } - return bios->size >= limit; + return bios->size >= upto; } static int @@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) struct nvbios_image image; int score = 1; - if (!shadow_fetch(bios, mthd, offset + 0x1000)) { - nvkm_debug(subdev, "%08x: header fetch failed\n", offset); - return 0; - } + if (mthd->func->no_pcir) { + image.base = 0; + image.type = 0; + image.size = mthd->func->size(mthd->data); + image.last = 1; + } else { + if (!shadow_fetch(bios, mthd, offset + 0x1000)) { + nvkm_debug(subdev, "%08x: header fetch failed\n", + offset); + return 0; + } - if (!nvbios_image(bios, idx, &image)) { - nvkm_debug(subdev, "image %d invalid\n", idx); - return 0; + if (!nvbios_image(bios, idx, &image)) { + nvkm_debug(subdev, "image %d invalid\n", idx); + return 0; + } } nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); @@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) switch (image.type) { case 0x00: - if (nvbios_checksum(&bios->data[image.base], image.size)) { + if (!mthd->func->ignore_checksum && + nvbios_checksum(&bios->data[image.base], image.size)) { nvkm_debug(subdev, "%08x: checksum failed\n", image.base); if (mthd->func->rw) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index bd60d7dd09f5..4bf486b57101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c @@ -21,6 +21,7 @@ * */ #include "priv.h" + #include <core/pci.h> #if defined(__powerpc__) @@ -33,17 +34,26 @@ static u32 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) { struct priv *priv = data; - if (offset + length <= priv->size) { + if (offset < priv->size) { + length = min_t(u32, length, priv->size - offset); memcpy_fromio(bios->data + offset, priv->data + offset, length); return length; } return 0; } +static u32 +of_size(void *data) +{ + struct priv *priv = data; + return priv->size; +} + static void * of_init(struct nvkm_bios *bios, const char *name) { - struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev; + struct nvkm_device *device = bios->subdev.device; + struct pci_dev *pdev = device->func->pci(device)->pdev; struct device_node *dn; struct priv *priv; if (!(dn = pci_device_to_OF_node(pdev))) @@ -62,7 +72,10 @@ nvbios_of = { .init = of_init, .fini = (void(*)(void *))kfree, .read = of_read, + .size = of_size, .rw = false, + .ignore_checksum = true, + .no_pcir = true, }; #else const struct nvbios_source diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c index 814cb51cc873..385a90f91ed6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c @@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk nvkm_device_agp_quirks[] = { /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, + /* SiS 761 does not support AGP cards, use PCI mode */ + { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 }, {}, }; @@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci) while (quirk->hostbridge_vendor) { if (info.device->vendor == quirk->hostbridge_vendor && info.device->device == quirk->hostbridge_device && - pci->pdev->vendor == quirk->chip_vendor && - pci->pdev->device == quirk->chip_device) { + (quirk->chip_vendor == (u16)PCI_ANY_ID || + pci->pdev->vendor == quirk->chip_vendor) && + (quirk->chip_device == (u16)PCI_ANY_ID || + pci->pdev->device == quirk->chip_device)) { nvkm_info(subdev, "forcing default agp mode to %dX, " "use NvAGP=<mode> to override\n", quirk->mode); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7c6225c84ba6..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -244,6 +244,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, ret = qxl_bo_reserve(bo, false); if (ret) return ret; + ret = qxl_bo_pin(bo, bo->type, NULL); + qxl_bo_unreserve(bo); + if (ret) + return ret; qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); @@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, } drm_vblank_put(dev, qcrtc->index); - qxl_bo_unreserve(bo); + ret = qxl_bo_reserve(bo, false); + if (!ret) { + qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + } return 0; } @@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { @@ -886,13 +894,15 @@ static enum drm_connector_status qxl_conn_detect( drm_connector_to_qxl_output(connector); struct drm_device *ddev = connector->dev; struct qxl_device *qdev = ddev->dev_private; - int connected; + bool connected = false; /* The first monitor is always connected */ - connected = (output->index == 0) || - (qdev->client_monitors_config && - qdev->client_monitors_config->count > output->index && - qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); + if (!qdev->client_monitors_config) { + if (output->index == 0) + connected = true; + } else + connected = qdev->client_monitors_config->count > output->index && + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); DRM_DEBUG("#%d connected: %d\n", output->index, connected); if (!connected) diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 41c422fee31a..c4a552637c93 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev, spin_lock_irqsave(&qfbdev->dirty.lock, flags); - if (qfbdev->dirty.y1 < y) - y = qfbdev->dirty.y1; - if (qfbdev->dirty.y2 > y2) - y2 = qfbdev->dirty.y2; - if (qfbdev->dirty.x1 < x) - x = qfbdev->dirty.x1; - if (qfbdev->dirty.x2 > x2) - x2 = qfbdev->dirty.x2; + if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) && + (qfbdev->dirty.x2 - qfbdev->dirty.x1)) { + if (qfbdev->dirty.y1 < y) + y = qfbdev->dirty.y1; + if (qfbdev->dirty.y2 > y2) + y2 = qfbdev->dirty.y2; + if (qfbdev->dirty.x1 < x) + x = qfbdev->dirty.x1; + if (qfbdev->dirty.x2 > x2) + x2 = qfbdev->dirty.x2; + } qfbdev->dirty.x1 = x; qfbdev->dirty.x2 = x2; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b66ec331c17c..4efa8e261baf 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); + bo = to_qxl_bo(entry->tv.bo); (*release)->release_offset = create_rel->release_offset + 64; @@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); - - qxl_bo_unref(&bo); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d8319dae8358..f3f562f6d848 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); + drm_modeset_lock_all(dev); /* turn off display hw */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } + drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (fbcon) { drm_helper_resume_force_mode(dev); /* turn on display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d2e9e9efc159..6743174acdbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); - if (rdev->pm.dpm_enabled) { - /* do dpm late init */ - ret = radeon_pm_late_init(rdev); - if (ret) { - rdev->pm.dpm_enabled = false; - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); - } - /* set the dpm state for PX since there won't be - * a modeset to call this. - */ - radeon_pm_compute_clocks(rdev); - } + /* do pm late init */ + ret = radeon_pm_late_init(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..744f5c49c664 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..26da2f4d7b4f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector { drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); } + +void radeon_fbdev_restore_mode(struct radeon_device *rdev) +{ + struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!rfbdev) + return; + + fb_helper = &rfbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4a119c255ba9..0e932bf932c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * radeon_driver_firstopen_kms - drm callback for last close + * radeon_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void radeon_driver_lastclose_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + radeon_fbdev_restore_mode(rdev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index aecc3e3dec0c..457b026a0972 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev); void radeon_fbdev_fini(struct radeon_device *rdev); void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); +void radeon_fbdev_restore_mode(struct radeon_device *rdev); void radeon_fb_output_poll_changed(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 05751f3f8444..6a0a176e26ec 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct radeon_device *rdev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -1326,14 +1330,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { - /* where's the best place to put these? */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -1391,20 +1387,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) goto dpm_failed; rdev->pm.dpm_enabled = true; - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - /* XXX: these are noops for dpm but are here for backwards compat */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); } @@ -1545,9 +1527,44 @@ int radeon_pm_late_init(struct radeon_device *rdev) int ret = 0; if (rdev->pm.pm_method == PM_METHOD_DPM) { - mutex_lock(&rdev->pm.mutex); - ret = radeon_dpm_late_enable(rdev); - mutex_unlock(&rdev->pm.mutex); + if (rdev->pm.dpm_enabled) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + + mutex_lock(&rdev->pm.mutex); + ret = radeon_dpm_late_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } else { + /* set the dpm state for PX since there won't be + * a modeset to call this. + */ + radeon_pm_compute_clocks(rdev); + } + } + } else { + if (rdev->pm.num_power_states > 1) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + } } return ret; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 787cd8fd897f..e72bf46042e0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2927,6 +2927,8 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d9b7de25613..745e996d2dbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret) return ret; man = &bdev->man[mem_type]; + if (!man->has_type || !man->use_type) + continue; type_ok = ttm_bo_mt_compatible(man, mem_type, place, &cur_flags); @@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!type_ok) continue; + type_found = true; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* @@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (mem_type == TTM_PL_SYSTEM) break; - if (man->has_type && man->use_type) { - type_found = true; - ret = (*man->func->get_node)(man, bo, place, mem); - if (unlikely(ret)) - return ret; - } + ret = (*man->func->get_node)(man, bo, place, mem); + if (unlikely(ret)) + return ret; + if (mem->mm_node) break; } @@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return 0; } - if (!type_found) - return -EINVAL; - for (i = 0; i < placement->num_busy_placement; ++i) { const struct ttm_place *place = &placement->busy_placement[i]; @@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret) return ret; man = &bdev->man[mem_type]; - if (!man->has_type) + if (!man->has_type || !man->use_type) continue; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) continue; + type_found = true; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); /* @@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (ret == -ERESTARTSYS) has_erestartsys = true; } - ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; - return ret; + + if (!type_found) { + printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); + return -EINVAL; + } + + return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; } EXPORT_SYMBOL(ttm_bo_mem_space); diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index db8b49101a8b..512263919282 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; - seq_printf(m, "fence %ld %lld\n", - atomic64_read(&vgdev->fence_drv.last_seq), + seq_printf(m, "fence %llu %lld\n", + (u64)atomic64_read(&vgdev->fence_drv.last_seq), vgdev->fence_drv.sync_seq); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..67097c9ce9c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); - snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq)); + snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } static const struct fence_ops virtio_fence_ops = { diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 67720f70fe29..b49445df8a7e 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -1,6 +1,6 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" - depends on DRM && PCI + depends on DRM && PCI && X86 select FB_DEFERRED_IO select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + (void) vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ce659a125f2b..092ea81eeff7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res, struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = val_buf->bo; struct vmw_fence_obj *fence; - int ret; if (list_empty(&res->mob_head)) return 0; @@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e13b20bd9908..2c7a25c71af2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; - - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, - dev_priv->mmio_size); - - dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, - dev_priv->mmio_size); + dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start, + dev_priv->mmio_size); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; @@ -913,7 +909,6 @@ out_no_device: out_err4: iounmap(dev_priv->mmio_virt); out_err3: - arch_phys_wc_del(dev_priv->mmio_mtrr); vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) @@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); - arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_ttm_global_release(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6d02de6dc36c..f19fd39b43e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -376,7 +376,6 @@ struct vmw_private { uint32_t initial_width; uint32_t initial_height; u32 __iomem *mmio_virt; - int mmio_mtrr; uint32_t capabilities; uint32_t max_gmr_ids; uint32_t max_gmr_pages; @@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, uint32_t size, bool shareable, uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf); + struct vmw_dma_buffer **p_dma_buf, + struct ttm_base_object **p_base); extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, struct vmw_dma_buffer *dma_buf, uint32_t *handle); @@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, uint32_t cur_validate_node); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_dma_buffer **out); + uint32_t id, struct vmw_dma_buffer **out, + struct ttm_base_object **base); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index b56565457c96..5da5de0cb522 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, + NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); ret = -EINVAL; @@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, + NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); ret = -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 61fb7f3de311..15a6c01cd016 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, struct drm_crtc *crtc; u32 num_units = 0; u32 i, k; - int ret; dirty->dev_priv = dev_priv; @@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, if (!dirty->cmd) { DRM_ERROR("Couldn't reserve fifo space " "for dirty blits.\n"); - return ret; + return -ENOMEM; } memset(dirty->cmd, 0, dirty->fifo_reserve_size); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 76069f093ccf..222c9c2123a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c1912f852b42..e57667ca7557 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); + ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); return ret; } @@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, uint32_t size, bool shareable, uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf) + struct vmw_dma_buffer **p_dma_buf, + struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *user_bo; struct ttm_buffer_object *tmp; @@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, } *p_dma_buf = &user_bo->dma; + if (p_base) { + *p_base = &user_bo->prime.base; + kref_get(&(*p_base)->refcount); + } *handle = user_bo->prime.base.hash.key; out_no_base_object: @@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, struct vmw_dma_buffer *dma_buf; struct vmw_user_dma_buffer *user_bo; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct ttm_base_object *buffer_base; int ret; if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 @@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, switch (arg->op) { case drm_vmw_synccpu_grab: - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, + &buffer_base); if (unlikely(ret != 0)) return ret; @@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, dma); ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); vmw_dmabuf_unreference(&dma_buf); + ttm_base_object_unref(&buffer_base); if (unlikely(ret != 0 && ret != -ERESTARTSYS && ret != -EBUSY)) { DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", @@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf); + req->size, false, &handle, &dma_buf, + NULL); if (unlikely(ret != 0)) goto out_no_dmabuf; @@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, } int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_dma_buffer **out) + uint32_t handle, struct vmw_dma_buffer **out, + struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *vmw_user_bo; struct ttm_base_object *base; @@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); - ttm_base_object_unref(&base); + if (p_base) + *p_base = base; + else + ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; return 0; @@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, args->size, false, &args->handle, - &dma_buf); + &dma_buf, NULL); if (unlikely(ret != 0)) goto out_no_dmabuf; @@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, struct vmw_dma_buffer *out_buf; int ret; - ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); + ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); if (ret != 0) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index bba1ee395478..fd47547b0234 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, if (buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, - &buffer); + &buffer, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find buffer for shader " "creation.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 3361769842f4..03f63c749c02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -46,6 +46,7 @@ struct vmw_user_surface { struct vmw_surface srf; uint32_t size; struct drm_master *master; + struct ttm_base_object *backup_base; }; /** @@ -656,6 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; + if (user_srf->backup_base) + ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } @@ -851,7 +854,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, res->backup_size, true, &backup_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; @@ -1321,7 +1325,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, if (req->buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < res->backup_size) { DRM_ERROR("Surface backup buffer is too small.\n"); @@ -1335,7 +1340,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, req->drm_surface_flags & drm_vmw_surface_flag_shareable, &backup_handle, - &res->backup); + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 243f99a80253..e5a38d202a21 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -912,7 +912,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs) } } -static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ipu_irq_handler(struct irq_desc *desc) { struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -925,7 +925,7 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ipu_err_irq_handler(struct irq_desc *desc) { struct ipu_soc *ipu = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -1099,8 +1099,7 @@ static int ipu_irq_init(struct ipu_soc *ipu) } ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU", - handle_level_irq, 0, - IRQF_VALID, 0); + handle_level_irq, 0, 0, 0); if (ret < 0) { dev_err(ipu->dev, "failed to alloc generic irq chips\n"); irq_domain_remove(ipu->domain); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 2f9aead4ecfc..652afd11a9ef 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -204,6 +204,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_del(&channel->listentry); spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + + primary_channel = channel; } else { primary_channel = channel->primary_channel; spin_lock_irqsave(&primary_channel->lock, flags); @@ -211,6 +213,14 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) primary_channel->num_sc--; spin_unlock_irqrestore(&primary_channel->lock, flags); } + + /* + * We need to free the bit for init_vp_index() to work in the case + * of sub-channel, when we reload drivers like hv_netvsc. + */ + cpumask_clear_cpu(channel->target_cpu, + &primary_channel->alloced_cpus_in_node); + free_channel(channel); } @@ -458,6 +468,13 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui continue; } + /* + * NOTE: in the case of sub-channel, we clear the sub-channel + * related bit(s) in primary->alloced_cpus_in_node in + * hv_process_channel_removal(), so when we reload drivers + * like hv_netvsc in SMP guest, here we're able to re-allocate + * bit from primary->alloced_cpus_in_node. + */ if (!cpumask_test_cpu(cur_cpu, &primary->alloced_cpus_in_node)) { cpumask_set_cpu(cur_cpu, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 500b262b89bb..e13c902e8966 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1140,8 +1140,8 @@ config SENSORS_NCT6775 help If you say yes here you get support for the hardware monitoring functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D, - NCT6791D, NCT6792D and compatible Super-I/O chips. This driver - replaces the w83627ehf driver for NCT6775F and NCT6776F. + NCT6791D, NCT6792D, NCT6793D, and compatible Super-I/O chips. This + driver replaces the w83627ehf driver for NCT6775F and NCT6776F. This driver can also be built as a module. If so, the module will be called nct6775. diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { { .compatible = "stericsson,abx500-temp" }, {}, }; +MODULE_DEVICE_TABLE(of, abx500_temp_match); #endif static struct platform_driver abx500_temp_driver = { diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { { .compatible = "gpio-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_gpio_fan_match); #endif /* CONFIG_OF_GPIO */ static int gpio_fan_probe(struct platform_device *pdev) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index bd1c99deac71..8b4fa55e46c6 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -39,6 +39,7 @@ * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3 * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3 * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3 + * nct6793d 15 6 6 2+6 0xd120 0xc1 0x5ca3 * * #temp lists the number of monitored temperature sources (first value) plus * the number of directly connectable temperature sensors (second value). @@ -63,7 +64,7 @@ #define USE_ALTERNATE -enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 }; +enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 }; /* used to set data->name = nct6775_device_names[data->sio_kind] */ static const char * const nct6775_device_names[] = { @@ -73,6 +74,17 @@ static const char * const nct6775_device_names[] = { "nct6779", "nct6791", "nct6792", + "nct6793", +}; + +static const char * const nct6775_sio_names[] __initconst = { + "NCT6106D", + "NCT6775F", + "NCT6776D/F", + "NCT6779D", + "NCT6791D", + "NCT6792D", + "NCT6793D", }; static unsigned short force_id; @@ -104,6 +116,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_NCT6779_ID 0xc560 #define SIO_NCT6791_ID 0xc800 #define SIO_NCT6792_ID 0xc910 +#define SIO_NCT6793_ID 0xd120 #define SIO_ID_MASK 0xFFF0 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; @@ -354,6 +367,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1] /* NCT6776 specific data */ +/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */ +#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME +#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME + static const s8 NCT6776_ALARM_BITS[] = { 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */ 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */ @@ -533,7 +550,7 @@ static const s8 NCT6791_ALARM_BITS[] = { 4, 5, 13, -1, -1, -1, /* temp1..temp6 */ 12, 9 }; /* intrusion0, intrusion1 */ -/* NCT6792 specific data */ +/* NCT6792/NCT6793 specific data */ static const u16 NCT6792_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d }; @@ -1056,6 +1073,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) case nct6779: case nct6791: case nct6792: + case nct6793: return reg == 0x150 || reg == 0x153 || reg == 0x155 || ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || reg == 0x402 || @@ -1407,6 +1425,7 @@ static void nct6775_update_pwm_limits(struct device *dev) case nct6779: case nct6791: case nct6792: + case nct6793: reg = nct6775_read_value(data, data->REG_CRITICAL_PWM_ENABLE[i]); if (reg & data->CRITICAL_PWM_ENABLE_MASK) @@ -2822,6 +2841,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr, case nct6779: case nct6791: case nct6792: + case nct6793: nct6775_write_value(data, data->REG_CRITICAL_PWM[nr], val); reg = nct6775_read_value(data, @@ -3256,7 +3276,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data) pwm4pin = false; pwm5pin = false; pwm6pin = false; - } else { /* NCT6779D, NCT6791D, or NCT6792D */ + } else { /* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */ regval = superio_inb(sioreg, 0x1c); fan3pin = !(regval & (1 << 5)); @@ -3269,7 +3289,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) fan4min = fan4pin; - if (data->kind == nct6791 || data->kind == nct6792) { + if (data->kind == nct6791 || data->kind == nct6792 || + data->kind == nct6793) { regval = superio_inb(sioreg, 0x2d); fan6pin = (regval & (1 << 1)); pwm6pin = (regval & (1 << 0)); @@ -3528,8 +3549,8 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES; data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6775_REG_PWM; data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; @@ -3600,8 +3621,8 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6775_REG_PWM; data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; @@ -3643,6 +3664,7 @@ static int nct6775_probe(struct platform_device *pdev) break; case nct6791: case nct6792: + case nct6793: data->in_num = 15; data->pwm_num = 6; data->auto_pwm_num = 4; @@ -3677,8 +3699,8 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6775_REG_PWM; data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; @@ -3918,6 +3940,7 @@ static int nct6775_probe(struct platform_device *pdev) case nct6779: case nct6791: case nct6792: + case nct6793: break; } @@ -3950,6 +3973,7 @@ static int nct6775_probe(struct platform_device *pdev) break; case nct6791: case nct6792: + case nct6793: tmp |= 0x7e; break; } @@ -4047,7 +4071,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) if (reg != data->sio_reg_enable) superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable); - if (data->kind == nct6791 || data->kind == nct6792) + if (data->kind == nct6791 || data->kind == nct6792 || + data->kind == nct6793) nct6791_enable_io_mapping(sioreg); superio_exit(sioreg); @@ -4106,15 +4131,6 @@ static struct platform_driver nct6775_driver = { .probe = nct6775_probe, }; -static const char * const nct6775_sio_names[] __initconst = { - "NCT6106D", - "NCT6775F", - "NCT6776D/F", - "NCT6779D", - "NCT6791D", - "NCT6792D", -}; - /* nct6775_find() looks for a '627 in the Super-I/O config space */ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) { @@ -4150,6 +4166,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) case SIO_NCT6792_ID: sio_data->kind = nct6792; break; + case SIO_NCT6793_ID: + sio_data->kind = nct6793; + break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); @@ -4175,7 +4194,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); } - if (sio_data->kind == nct6791 || sio_data->kind == nct6792) + if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || + sio_data->kind == nct6793) nct6791_enable_io_mapping(sioaddr); superio_exit(sioaddr); @@ -4285,7 +4305,7 @@ static void __exit sensors_nct6775_exit(void) } MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); -MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver"); +MODULE_DESCRIPTION("Driver for NCT6775F and compatible chips"); MODULE_LICENSE("GPL"); module_init(sensors_nct6775_init); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { { .compatible = "pwm-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 3dd2de31a2f8..472b88285c75 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/clk.h> #include <linux/clk-provider.h> @@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) } #ifdef CONFIG_ACPI +/* + * The HCNT/LCNT information coming from ACPI should be the most accurate + * for given platform. However, some systems get it wrong. On such systems + * we get better results by calculating those based on the input clock. + */ +static const struct dmi_system_id dw_i2c_no_acpi_params[] = { + { + .ident = "Dell Inspiron 7348", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), + }, + }, + { } +}; + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], u16 *hcnt, u16 *lcnt, u32 *sda_hold) { @@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], acpi_handle handle = ACPI_HANDLE(&pdev->dev); union acpi_object *obj; + if (dmi_check_system(dw_i2c_no_acpi_params)) + return; + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) return; @@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev) adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - r = i2c_add_numbered_adapter(adap); - if (r) { - dev_err(&pdev->dev, "failure adding adapter\n"); - return r; - } - if (dev->pm_runtime_disabled) { pm_runtime_forbid(&pdev->dev); } else { @@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } + r = i2c_add_numbered_adapter(adap); + if (r) { + dev_err(&pdev->dev, "failure adding adapter\n"); + pm_runtime_disable(&pdev->dev); + return r; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d8361dada584..d8b5a8fee1e6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev) return ret; } + pm_runtime_enable(dev); + platform_set_drvdata(pdev, priv); + ret = i2c_add_numbered_adapter(adap); if (ret < 0) { dev_err(dev, "reg adap failed: %d\n", ret); + pm_runtime_disable(dev); return ret; } - pm_runtime_enable(dev); - platform_set_drvdata(pdev, priv); - dev_info(dev, "probed\n"); return 0; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 50bfd8cef5f2..5df819610d52 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) i2c->adap.nr = i2c->pdata->bus_num; i2c->adap.dev.of_node = pdev->dev.of_node; + platform_set_drvdata(pdev, i2c); + + pm_runtime_enable(&pdev->dev); + ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + pm_runtime_disable(&pdev->dev); s3c24xx_i2c_deregister_cpufreq(i2c); clk_unprepare(i2c->clk); return ret; } - platform_set_drvdata(pdev, i2c); - - pm_runtime_enable(&pdev->dev); pm_runtime_enable(&i2c->adap.dev); dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5f89f1e3c2f2..a59c3111f7fb 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev) goto err_clear_wakeup_irq; status = dev_pm_domain_attach(&client->dev, true); - if (status != -EPROBE_DEFER) { - status = driver->probe(client, i2c_match_id(driver->id_table, - client)); - if (status) - goto err_detach_pm_domain; - } + if (status == -EPROBE_DEFER) + goto err_clear_wakeup_irq; + + status = driver->probe(client, i2c_match_id(driver->id_table, client)); + if (status) + goto err_detach_pm_domain; return 0; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { .name = "C6-SKL", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 75, + .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, @@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = { .name = "C8-SKL", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 174, + .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { + .name = "C9-SKL", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, + .target_residency = 5000, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { .name = "C10-SKL", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index ff30f8806880..fb9311110424 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -149,8 +149,6 @@ #define ST_ACCEL_4_BDU_MASK 0x40 #define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 #define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04 -#define ST_ACCEL_4_IG1_EN_ADDR 0x21 -#define ST_ACCEL_4_IG1_EN_MASK 0x08 #define ST_ACCEL_4_MULTIREAD_BIT true /* CUSTOM VALUES FOR SENSOR 5 */ @@ -489,10 +487,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, - .ig1 = { - .en_addr = ST_ACCEL_4_IG1_EN_ADDR, - .en_mask = ST_ACCEL_4_IG1_EN_MASK, - }, }, .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, .bootime = 2, /* guess */ diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index ebe415f10640..0c74869a540a 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -45,13 +45,18 @@ #include <linux/types.h> #include <linux/gfp.h> #include <linux/err.h> +#include <linux/regulator/consumer.h> #include <linux/iio/iio.h> +#define TWL4030_USB_SEL_MADC_MCPC (1<<3) +#define TWL4030_USB_CARKIT_ANA_CTRL 0xBB + /** * struct twl4030_madc_data - a container for madc info * @dev: Pointer to device structure for madc * @lock: Mutex protecting this data structure + * @regulator: Pointer to bias regulator for madc * @requests: Array of request struct corresponding to SW1, SW2 and RT * @use_second_irq: IRQ selection (main or co-processor) * @imr: Interrupt mask register of MADC @@ -60,6 +65,7 @@ struct twl4030_madc_data { struct device *dev; struct mutex lock; /* mutex protecting this data structure */ + struct regulator *usb3v1; struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; bool use_second_irq; u8 imr; @@ -841,6 +847,32 @@ static int twl4030_madc_probe(struct platform_device *pdev) } twl4030_madc = madc; + /* Configure MADC[3:6] */ + ret = twl_i2c_read_u8(TWL_MODULE_USB, ®val, + TWL4030_USB_CARKIT_ANA_CTRL); + if (ret) { + dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL 0x%X\n", + TWL4030_USB_CARKIT_ANA_CTRL); + goto err_i2c; + } + regval |= TWL4030_USB_SEL_MADC_MCPC; + ret = twl_i2c_write_u8(TWL_MODULE_USB, regval, + TWL4030_USB_CARKIT_ANA_CTRL); + if (ret) { + dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n", + TWL4030_USB_CARKIT_ANA_CTRL); + goto err_i2c; + } + + /* Enable 3v1 bias regulator for MADC[3:6] */ + madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); + if (IS_ERR(madc->usb3v1)) + return -ENODEV; + + ret = regulator_enable(madc->usb3v1); + if (ret) + dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); + ret = iio_device_register(iio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio device\n"); @@ -866,6 +898,8 @@ static int twl4030_madc_remove(struct platform_device *pdev) twl4030_madc_set_current_generator(madc, 0, 0); twl4030_madc_set_power(madc, 0); + regulator_disable(madc->usb3v1); + return 0; } diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index da4c6979fbb8..aa26f3c3416b 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -56,7 +56,6 @@ config INFINIBAND_ADDR_TRANS source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/qib/Kconfig" -source "drivers/infiniband/hw/ehca/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" source "drivers/infiniband/hw/cxgb4/Kconfig" source "drivers/infiniband/hw/mlx4/Kconfig" diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 8f66c67ff0df..87471ef37198 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; + mutex_lock(&table->lock); ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); /* Coudn't find default GID location */ WARN_ON(ix < 0); - mutex_lock(&table->lock); if (!__ib_cache_gid_get(ib_dev, port, ix, ¤t_gid, ¤t_gid_attr) && mode == IB_CACHE_GID_DEFAULT_MODE_SET && diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ea4db9c1d44f..4f918b929eca 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -835,6 +835,11 @@ retest: case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); + spin_lock_irq(&cm.lock); + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) + rb_erase(&cm_id_priv->sidr_id_node, + &cm.remote_sidr_table); + spin_unlock_irq(&cm.lock); break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm.lock, flags); - rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { + rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); + RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); + } spin_unlock_irqrestore(&cm.lock, flags); return 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b1ab13f3e182..36b12d560e17 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, sizeof(req->local_gid)); req->has_gid = true; req->service_id = req_param->primary_path->service_id; - req->pkey = req_param->bth_pkey; + req->pkey = be16_to_cpu(req_param->primary_path->pkey); break; case IB_CM_SIDR_REQ_RECEIVED: req->device = sidr_param->listen_id->device; req->port = sidr_param->port; req->has_gid = false; req->service_id = sidr_param->service_id; - req->pkey = sidr_param->bth_pkey; + req->pkey = sidr_param->pkey; break; default: return -EINVAL; @@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, return true; } +static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) +{ + enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); + enum rdma_transport_type transport = + rdma_node_get_transport(device->node_type); + + return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; +} + +static bool cma_protocol_roce(const struct rdma_cm_id *id) +{ + struct ib_device *device = id->device; + const int port_num = id->port_num ?: rdma_start_port(device); + + return cma_protocol_roce_dev_port(device, port_num); +} + static bool cma_match_net_dev(const struct rdma_id_private *id_priv, const struct net_device *net_dev) { const struct rdma_addr *addr = &id_priv->id.route.addr; if (!net_dev) - /* This request is an AF_IB request */ - return addr->src_addr.ss_family == AF_IB; + /* This request is an AF_IB request or a RoCE request */ + return addr->src_addr.ss_family == AF_IB || + cma_protocol_roce(&id_priv->id); return !addr->dev_addr.bound_dev_if || (net_eq(dev_net(net_dev), &init_net) && @@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { /* Assuming the protocol is AF_IB */ *net_dev = NULL; + } else if (cma_protocol_roce_dev_port(req.device, req.port)) { + /* TODO find the net dev matching the request parameters + * through the RoCE GID table */ + *net_dev = NULL; } else { return ERR_CAST(*net_dev); } @@ -1302,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); - if (IS_ERR(id_priv)) { + if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } @@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv), - &rt->addr.dev_addr); + if (!cma_protocol_roce(listen_id) && + cma_any_addr(cma_src_addr(id_priv))) { + rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; + rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); + ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); + } else if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); + if (ret) + goto err; + } } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); @@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - if (!cma_any_addr(cma_src_addr(id_priv))) - cma_translate_ib((struct sockaddr_ib *) - cma_src_addr(id_priv), - &id->route.addr.dev_addr); + if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), + &id->route.addr.dev_addr); + if (ret) + goto err; + } } id_priv->state = RDMA_CM_CONNECT; diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 6b24cba1e474..178f98482e13 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { struct in_device *in_dev; + struct sin_list { + struct list_head list; + struct sockaddr_in ip; + }; + struct sin_list *sin_iter; + struct sin_list *sin_temp; + LIST_HEAD(sin_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; - in_dev = in_dev_get(ndev); - if (!in_dev) + rcu_read_lock(); + in_dev = __in_dev_get_rcu(ndev); + if (!in_dev) { + rcu_read_unlock(); return; + } for_ifa(in_dev) { - struct sockaddr_in ip; + struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - ip.sin_family = AF_INET; - ip.sin_addr.s_addr = ifa->ifa_address; - update_gid_ip(GID_ADD, ib_dev, port, ndev, - (struct sockaddr *)&ip); + if (!entry) { + pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n"); + continue; + } + entry->ip.sin_family = AF_INET; + entry->ip.sin_addr.s_addr = ifa->ifa_address; + list_add_tail(&entry->list, &sin_list); } endfor_ifa(in_dev); + rcu_read_unlock(); - in_dev_put(in_dev); + list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { + update_gid_ip(GID_ADD, ib_dev, port, ndev, + (struct sockaddr *)&sin_iter->ip); + list_del(&sin_iter->list); + kfree(sin_iter); + } } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index a53fc9b01c69..30467d10df91 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp) if (!file) return -ENOMEM; + file->close_wq = create_singlethread_workqueue("ucma_close_id"); + if (!file->close_wq) { + kfree(file); + return -ENOMEM; + } + INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); - file->close_wq = create_singlethread_workqueue("ucma_close_id"); filp->private_data = file; file->filp = filp; diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1bdb9996d371..aded2a5cc2d5 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -1,6 +1,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ obj-$(CONFIG_INFINIBAND_QIB) += qib/ -obj-$(CONFIG_INFINIBAND_EHCA) += ehca/ obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; @@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm return 0; } -static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) -{ - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *seg; - struct mlx5_core_mr mr; - int err; - - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - seg = &in->seg; - seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; - seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - seg->start_addr = 0; - - err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), - NULL, NULL, NULL); - if (err) { - mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); - goto err_in; - } - - kfree(in); - *key = mr.key; - - return 0; - -err_in: - kfree(in); - - return err; -} - -static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) -{ - struct mlx5_core_mr mr; - int err; - - memset(&mr, 0, sizeof(mr)); - mr.key = key; - err = mlx5_core_destroy_mkey(dev->mdev, &mr); - if (err) - mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); -} - static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) @@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, kfree(pd); return ERR_PTR(-EFAULT); } - } else { - err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); - if (err) { - mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); - kfree(pd); - return ERR_PTR(err); - } } return &pd->ibpd; @@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); - if (!pd->uobject) - free_pa_mkey(mdev, mpd->pa_lkey); - mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); kfree(mpd); @@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; - u32 rsvd_lkey; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); - ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); - if (ret) { - pr_err("Failed to query special context %d\n", ret); - return ret; - } - dev->ib_dev.local_dma_lkey = rsvd_lkey; - devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); @@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte struct mlx5_ib_pd { struct ib_pd ibpd; u32 pdn; - u32 pa_lkey; }; /* Use macros here so that don't have to duplicate @@ -213,7 +212,6 @@ struct mlx5_ib_qp { int uuarn; int create_type; - u32 pa_lkey; /* Store signature errors */ bool signature_en; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); - else - qp->pa_lkey = to_mpd(pd)->pa_lkey; } if (err) @@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); dseg->addr = cpu_to_be64(mfrpl->map); dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); - dseg->lkey = cpu_to_be32(pd->pa_lkey); + dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); } static __be32 send_ieth(struct ib_send_wr *wr) diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h index 5be13d8991bc..f903502d3883 100644 --- a/drivers/infiniband/hw/usnic/usnic.h +++ b/drivers/infiniband/hw/usnic/usnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 04a66229584e..7fe9502ce8d3 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h index 393567266142..596e0ed49a8e 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h +++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h index 9d737ed5e55d..b54986de5f0c 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_util.h +++ b/drivers/infiniband/hw/usnic/usnic_common_util.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c index 5d13860161a4..5e55b8bc6fe4 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.c +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h index 4087d24a88f6..98453e91daa6 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.h +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index e3c9bd9d3ba3..3c37dd59c04e 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index 93713a2230b3..3a8add9ddf46 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h index e5a9297dd1bd..525bf272671e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib.h +++ b/drivers/infiniband/hw/usnic/usnic_ib.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 34c49b8105fe..0c15bd885035 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index db3588df3546..85dc3f989ff7 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h index b0aafe8db0c3..b1458be1d402 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 27dc67c1689f..3412ea06116e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h index 0d09b493cd02..3d98e16cfeaf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 7df43827cb29..f8e3211689a3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0bd04efa16f3..414eaa566bd9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h index 75777a66c684..183fcb6a952f 100644 --- a/drivers/infiniband/hw/usnic/usnic_log.h +++ b/drivers/infiniband/hw/usnic/usnic_log.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c index ddef6f77a78c..de318389a301 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.c +++ b/drivers/infiniband/hw/usnic/usnic_transport.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h index 7e5dc6d9f462..9a7a2d9755c0 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.h +++ b/drivers/infiniband/hw/usnic/usnic_transport.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index cb2337f0532b..645a5f6e6c88 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -7,7 +7,7 @@ * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: + * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 70440996e8f2..45ca7c1613a7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 3a4288e0fbac..42b4b4c4e452 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index d4f752e258fd..c0b0b876ab90 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c index 656b88c39eda..66de93fb8ea9 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.c +++ b/drivers/infiniband/hw/usnic/usnic_vnic.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h index 14d931a8829d..a08423e478af 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.h +++ b/drivers/infiniband/hw/usnic/usnic_vnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..edc5b8565d6d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -80,7 +80,7 @@ enum { IPOIB_NUM_WC = 4, IPOIB_MAX_PATH_REC_QUEUE = 3, - IPOIB_MAX_MCAST_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_INITIALIZED = 1, @@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev); void ipoib_mcast_join_task(struct work_struct *work); void ipoib_mcast_carrier_on_task(struct work_struct *work); void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); +void ipoib_mcast_free(struct ipoib_mcast *mc); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); @@ -548,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey); +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); int ipoib_init_qp(struct net_device *dev); int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..babba05d7a0e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) unsigned long dt; unsigned long flags; int i; + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + struct net_device *dev = priv->dev; if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; @@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { + u8 *mgid = neigh->daddr + 4; + + /* Is this multicast ? */ + if (*mgid == 0xff) { + mcast = __ipoib_mcast_find(dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + } + rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, lockdep_is_held(&priv->lock))); @@ -1191,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { + ipoib_mcast_leave(dev, mcast); + ipoib_mcast_free(mcast); + } } static void ipoib_reap_neigh(struct work_struct *work) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..d750a86042f3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, queue_delayed_work(priv->wq, &priv->mcast_task, 0); } -static void ipoib_mcast_free(struct ipoib_mcast *mcast) +void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; int tx_dropped = 0; @@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, return mcast; } -static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; @@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) rec.hop_limit = priv->broadcast->mcmember.hop_limit; /* - * Historically Linux IPoIB has never properly supported SEND - * ONLY join. It emulated it by not providing all the required - * attributes, which is enough to prevent group creation and - * detect if there are full members or not. A major problem - * with supporting SEND ONLY is detecting when the group is - * auto-destroyed as IPoIB will cache the MLID.. + * Send-only IB Multicast joins do not work at the core + * IB layer yet, so we can't use them here. However, + * we are emulating an Ethernet multicast send, which + * does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspecitive, of Ethernet multicast + * operation. For now, we do a full join, maybe later + * when the core IB layers support send only joins we + * will use them. */ -#if 1 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; -#else +#if 0 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) rec.join_state = 4; #endif @@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) return 0; } -static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -611,6 +611,7 @@ extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; extern unsigned int iser_max_sectors; +extern bool iser_always_reg; int iser_assign_reg_ops(struct iser_device *device); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -803,11 +803,12 @@ static int iser_reg_prot_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); @@ -817,11 +818,12 @@ static int iser_reg_data_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); @@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, struct iser_mem_reg *reg = &task->rdma_reg[dir]; struct iser_mem_reg *data_reg; struct iser_fr_desc *desc = NULL; + bool use_dma_key; int err; err = iser_handle_unaligned_buf(task, mem, dir); if (unlikely(err)) return err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { desc = device->reg_ops->reg_desc_get(ib_conn); reg->mem_h = desc; } @@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, else data_reg = &task->desc.data_reg; - err = iser_reg_data_sg(task, mem, desc, data_reg); + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); if (unlikely(err)) goto err_reg; @@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, if (unlikely(err)) goto err_reg; - err = iser_reg_prot_sg(task, mem, desc, prot_reg); + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); if (unlikely(err)) goto err_reg; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -173,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); + if (device->mr) + (void)ib_dereg_mr(device->mr); ib_dealloc_pd(device->pd); kfree(device->comps); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 403bd29443b8..aa59037d7504 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -238,8 +238,6 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_sg->lkey = device->pd->local_dma_lkey; } - isert_conn->rx_desc_head = 0; - return 0; dma_map_fail: @@ -634,7 +632,7 @@ static void isert_init_conn(struct isert_conn *isert_conn) { isert_conn->state = ISER_CONN_INIT; - INIT_LIST_HEAD(&isert_conn->accept_node); + INIT_LIST_HEAD(&isert_conn->node); init_completion(&isert_conn->login_comp); init_completion(&isert_conn->login_req_comp); init_completion(&isert_conn->wait); @@ -762,28 +760,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ret = isert_rdma_post_recvl(isert_conn); if (ret) goto out_conn_dev; - /* - * Obtain the second reference now before isert_rdma_accept() to - * ensure that any initiator generated REJECT CM event that occurs - * asynchronously won't drop the last reference until the error path - * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() -> - * isert_free_conn() -> isert_put_conn() -> kref_put(). - */ - if (!kref_get_unless_zero(&isert_conn->kref)) { - isert_warn("conn %p connect_release is running\n", isert_conn); - goto out_conn_dev; - } ret = isert_rdma_accept(isert_conn); if (ret) goto out_conn_dev; - mutex_lock(&isert_np->np_accept_mutex); - list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); - mutex_unlock(&isert_np->np_accept_mutex); + mutex_lock(&isert_np->mutex); + list_add_tail(&isert_conn->node, &isert_np->accepted); + mutex_unlock(&isert_np->mutex); - isert_info("np %p: Allow accept_np to continue\n", np); - up(&isert_np->np_sem); return 0; out_conn_dev: @@ -831,13 +816,21 @@ static void isert_connected_handler(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + struct isert_np *isert_np = cma_id->context; isert_info("conn %p\n", isert_conn); mutex_lock(&isert_conn->mutex); - if (isert_conn->state != ISER_CONN_FULL_FEATURE) - isert_conn->state = ISER_CONN_UP; + isert_conn->state = ISER_CONN_UP; + kref_get(&isert_conn->kref); mutex_unlock(&isert_conn->mutex); + + mutex_lock(&isert_np->mutex); + list_move_tail(&isert_conn->node, &isert_np->pending); + mutex_unlock(&isert_np->mutex); + + isert_info("np %p: Allow accept_np to continue\n", isert_np); + up(&isert_np->sem); } static void @@ -903,14 +896,14 @@ isert_np_cma_handler(struct isert_np *isert_np, switch (event) { case RDMA_CM_EVENT_DEVICE_REMOVAL: - isert_np->np_cm_id = NULL; + isert_np->cm_id = NULL; break; case RDMA_CM_EVENT_ADDR_CHANGE: - isert_np->np_cm_id = isert_setup_id(isert_np); - if (IS_ERR(isert_np->np_cm_id)) { + isert_np->cm_id = isert_setup_id(isert_np); + if (IS_ERR(isert_np->cm_id)) { isert_err("isert np %p setup id failed: %ld\n", - isert_np, PTR_ERR(isert_np->np_cm_id)); - isert_np->np_cm_id = NULL; + isert_np, PTR_ERR(isert_np->cm_id)); + isert_np->cm_id = NULL; } break; default: @@ -929,7 +922,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, struct isert_conn *isert_conn; bool terminating = false; - if (isert_np->np_cm_id == cma_id) + if (isert_np->cm_id == cma_id) return isert_np_cma_handler(cma_id->context, event); isert_conn = cma_id->qp->qp_context; @@ -945,13 +938,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, if (terminating) goto out; - mutex_lock(&isert_np->np_accept_mutex); - if (!list_empty(&isert_conn->accept_node)) { - list_del_init(&isert_conn->accept_node); + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_conn->node)) { + list_del_init(&isert_conn->node); isert_put_conn(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); } - mutex_unlock(&isert_np->np_accept_mutex); + mutex_unlock(&isert_np->mutex); out: return 0; @@ -962,6 +955,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + list_del_init(&isert_conn->node); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -1006,35 +1000,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } static int -isert_post_recv(struct isert_conn *isert_conn, u32 count) +isert_post_recvm(struct isert_conn *isert_conn, u32 count) { struct ib_recv_wr *rx_wr, *rx_wr_failed; int i, ret; - unsigned int rx_head = isert_conn->rx_desc_head; struct iser_rx_desc *rx_desc; for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { - rx_desc = &isert_conn->rx_descs[rx_head]; - rx_wr->wr_id = (uintptr_t)rx_desc; - rx_wr->sg_list = &rx_desc->rx_sg; - rx_wr->num_sge = 1; - rx_wr->next = rx_wr + 1; - rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); + rx_desc = &isert_conn->rx_descs[i]; + rx_wr->wr_id = (uintptr_t)rx_desc; + rx_wr->sg_list = &rx_desc->rx_sg; + rx_wr->num_sge = 1; + rx_wr->next = rx_wr + 1; } - rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ isert_conn->post_recv_buf_count += count; ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, - &rx_wr_failed); + &rx_wr_failed); if (ret) { isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_conn->post_recv_buf_count -= count; - } else { - isert_dbg("Posted %d RX buffers\n", count); - isert_conn->rx_desc_head = rx_head; } + + return ret; +} + +static int +isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) +{ + struct ib_recv_wr *rx_wr_failed, rx_wr; + int ret; + + rx_wr.wr_id = (uintptr_t)rx_desc; + rx_wr.sg_list = &rx_desc->rx_sg; + rx_wr.num_sge = 1; + rx_wr.next = NULL; + + isert_conn->post_recv_buf_count++; + ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); + if (ret) { + isert_err("ib_post_recv() failed with ret: %d\n", ret); + isert_conn->post_recv_buf_count--; + } + return ret; } @@ -1205,7 +1215,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, if (ret) return ret; - ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); + ret = isert_post_recvm(isert_conn, + ISERT_QP_MAX_RECV_DTOS); if (ret) return ret; @@ -1278,7 +1289,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) } static struct iscsi_cmd -*isert_allocate_cmd(struct iscsi_conn *conn) +*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) { struct isert_conn *isert_conn = conn->context; struct isert_cmd *isert_cmd; @@ -1292,6 +1303,7 @@ static struct iscsi_cmd isert_cmd = iscsit_priv_cmd(cmd); isert_cmd->conn = isert_conn; isert_cmd->iscsi_cmd = cmd; + isert_cmd->rx_desc = rx_desc; return cmd; } @@ -1303,9 +1315,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, { struct iscsi_conn *conn = isert_conn->conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; - struct scatterlist *sg; int imm_data, imm_data_len, unsol_data, sg_nents, rc; bool dump_payload = false; + unsigned int data_len; rc = iscsit_setup_scsi_cmd(conn, cmd, buf); if (rc < 0) @@ -1314,7 +1326,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, imm_data = cmd->immediate_data; imm_data_len = cmd->first_burst_len; unsol_data = cmd->unsolicited_data; + data_len = cmd->se_cmd.data_length; + if (imm_data && imm_data_len == data_len) + cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; rc = iscsit_process_scsi_cmd(conn, cmd, hdr); if (rc < 0) { return 0; @@ -1326,13 +1341,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (!imm_data) return 0; - sg = &cmd->se_cmd.t_data_sg[0]; - sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); - - isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", - sg, sg_nents, &rx_desc->data[0], imm_data_len); - - sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); + if (imm_data_len != data_len) { + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); + sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, + &rx_desc->data[0], imm_data_len); + isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", + sg_nents, imm_data_len); + } else { + sg_init_table(&isert_cmd->sg, 1); + cmd->se_cmd.t_data_sg = &isert_cmd->sg; + cmd->se_cmd.t_data_nents = 1; + sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + isert_dbg("Transfer Immediate imm_data_len: %d\n", + imm_data_len); + } cmd->write_data_done += imm_data_len; @@ -1407,6 +1429,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, if (rc < 0) return rc; + /* + * multiple data-outs on the same command can arrive - + * so post the buffer before hand + */ + rc = isert_post_recv(isert_conn, rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } return 0; } @@ -1479,7 +1510,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, switch (opcode) { case ISCSI_OP_SCSI_CMD: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1493,7 +1524,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_NOOP_OUT: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1506,7 +1537,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; @@ -1514,22 +1545,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_LOGOUT: - cmd = isert_allocate_cmd(conn); + cmd = isert_allocate_cmd(conn, rx_desc); if (!cmd) break; ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); break; case ISCSI_OP_TEXT: - if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { + if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); - if (!cmd) - break; - } else { - cmd = isert_allocate_cmd(conn); - if (!cmd) - break; - } + else + cmd = isert_allocate_cmd(conn, rx_desc); + + if (!cmd) + break; isert_cmd = iscsit_priv_cmd(cmd); ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, @@ -1589,7 +1618,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, struct ib_device *ib_dev = isert_conn->cm_id->device; struct iscsi_hdr *hdr; u64 rx_dma; - int rx_buflen, outstanding; + int rx_buflen; if ((char *)desc == isert_conn->login_req_buf) { rx_dma = isert_conn->login_req_dma; @@ -1629,22 +1658,6 @@ isert_rcv_completion(struct iser_rx_desc *desc, DMA_FROM_DEVICE); isert_conn->post_recv_buf_count--; - isert_dbg("Decremented post_recv_buf_count: %d\n", - isert_conn->post_recv_buf_count); - - if ((char *)desc == isert_conn->login_req_buf) - return; - - outstanding = isert_conn->post_recv_buf_count; - if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { - int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, - ISERT_MIN_POSTED_RX); - err = isert_post_recv(isert_conn, count); - if (err) { - isert_err("isert_post_recv() count: %d failed, %d\n", - count, err); - } - } } static int @@ -2156,6 +2169,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) struct ib_send_wr *wr_failed; int ret; + ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (ret) { + isert_err("ib_post_recv failed with %d\n", ret); + return ret; + } + ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, &wr_failed); if (ret) { @@ -2950,6 +2969,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) &isert_cmd->tx_desc.send_wr); isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; wr->send_wr_num += 1; + + rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } } rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); @@ -2999,9 +3024,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { - int ret; + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret = 0; switch (state) { + case ISTATE_REMOVE: + spin_lock_bh(&conn->cmd_lock); + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + isert_put_cmd(isert_cmd, true); + break; case ISTATE_SEND_NOPIN_WANT_RESPONSE: ret = isert_put_nopin(cmd, conn, false); break; @@ -3106,10 +3138,10 @@ isert_setup_np(struct iscsi_np *np, isert_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } - sema_init(&isert_np->np_sem, 0); - mutex_init(&isert_np->np_accept_mutex); - INIT_LIST_HEAD(&isert_np->np_accept_list); - init_completion(&isert_np->np_login_comp); + sema_init(&isert_np->sem, 0); + mutex_init(&isert_np->mutex); + INIT_LIST_HEAD(&isert_np->accepted); + INIT_LIST_HEAD(&isert_np->pending); isert_np->np = np; /* @@ -3125,7 +3157,7 @@ isert_setup_np(struct iscsi_np *np, goto out; } - isert_np->np_cm_id = isert_lid; + isert_np->cm_id = isert_lid; np->np_context = isert_np; return 0; @@ -3214,7 +3246,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) int ret; accept_wait: - ret = down_interruptible(&isert_np->np_sem); + ret = down_interruptible(&isert_np->sem); if (ret) return -ENODEV; @@ -3231,15 +3263,15 @@ accept_wait: } spin_unlock_bh(&np->np_thread_lock); - mutex_lock(&isert_np->np_accept_mutex); - if (list_empty(&isert_np->np_accept_list)) { - mutex_unlock(&isert_np->np_accept_mutex); + mutex_lock(&isert_np->mutex); + if (list_empty(&isert_np->pending)) { + mutex_unlock(&isert_np->mutex); goto accept_wait; } - isert_conn = list_first_entry(&isert_np->np_accept_list, - struct isert_conn, accept_node); - list_del_init(&isert_conn->accept_node); - mutex_unlock(&isert_np->np_accept_mutex); + isert_conn = list_first_entry(&isert_np->pending, + struct isert_conn, node); + list_del_init(&isert_conn->node); + mutex_unlock(&isert_np->mutex); conn->context = isert_conn; isert_conn->conn = conn; @@ -3257,28 +3289,39 @@ isert_free_np(struct iscsi_np *np) struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn, *n; - if (isert_np->np_cm_id) - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->cm_id) + rdma_destroy_id(isert_np->cm_id); /* * FIXME: At this point we don't have a good way to insure * that at this point we don't have hanging connections that * completed RDMA establishment but didn't start iscsi login * process. So work-around this by cleaning up what ever piled - * up in np_accept_list. + * up in accepted and pending lists. */ - mutex_lock(&isert_np->np_accept_mutex); - if (!list_empty(&isert_np->np_accept_list)) { - isert_info("Still have isert connections, cleaning up...\n"); + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_np->pending)) { + isert_info("Still have isert pending connections\n"); + list_for_each_entry_safe(isert_conn, n, + &isert_np->pending, + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); + isert_connect_release(isert_conn); + } + } + + if (!list_empty(&isert_np->accepted)) { + isert_info("Still have isert accepted connections\n"); list_for_each_entry_safe(isert_conn, n, - &isert_np->np_accept_list, - accept_node) { + &isert_np->accepted, + node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); isert_connect_release(isert_conn); } } - mutex_unlock(&isert_np->np_accept_mutex); + mutex_unlock(&isert_np->mutex); np->np_context = NULL; kfree(isert_np); @@ -3345,6 +3388,41 @@ isert_wait4flush(struct isert_conn *isert_conn) wait_for_completion(&isert_conn->wait_comp_err); } +/** + * isert_put_unsol_pending_cmds() - Drop commands waiting for + * unsolicitate dataout + * @conn: iscsi connection + * + * We might still have commands that are waiting for unsolicited + * dataouts messages. We must put the extra reference on those + * before blocking on the target_wait_for_session_cmds + */ +static void +isert_put_unsol_pending_cmds(struct iscsi_conn *conn) +{ + struct iscsi_cmd *cmd, *tmp; + static LIST_HEAD(drop_cmd_list); + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { + if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && + (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && + (cmd->write_data_done < cmd->se_cmd.data_length)) + list_move_tail(&cmd->i_conn_node, &drop_cmd_list); + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + if (cmd->i_state != ISTATE_REMOVE) { + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + + isert_info("conn %p dropping cmd %p\n", conn, cmd); + isert_put_cmd(isert_cmd, true); + } + } +} + static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; @@ -3363,8 +3441,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->mutex); - isert_wait4cmds(conn); isert_wait4flush(isert_conn); + isert_put_unsol_pending_cmds(conn); + isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 6a04ba3c0f72..c5b99bcecbcf 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -113,7 +113,6 @@ enum { }; struct isert_rdma_wr { - struct list_head wr_list; struct isert_cmd *isert_cmd; enum iser_ib_op_code iser_ib_op; struct ib_sge *ib_sge; @@ -134,14 +133,13 @@ struct isert_cmd { uint64_t write_va; u64 pdu_buf_dma; u32 pdu_buf_len; - u32 read_va_off; - u32 write_va_off; - u32 rdma_wr_num; struct isert_conn *conn; struct iscsi_cmd *iscsi_cmd; struct iser_tx_desc tx_desc; + struct iser_rx_desc *rx_desc; struct isert_rdma_wr rdma_wr; struct work_struct comp_work; + struct scatterlist sg; }; struct isert_device; @@ -159,11 +157,10 @@ struct isert_conn { u64 login_req_dma; int login_req_len; u64 login_rsp_dma; - unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; - struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; + struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS]; struct iscsi_conn *conn; - struct list_head accept_node; + struct list_head node; struct completion login_comp; struct completion login_req_comp; struct iser_tx_desc login_tx_desc; @@ -222,9 +219,9 @@ struct isert_device { struct isert_np { struct iscsi_np *np; - struct semaphore np_sem; - struct rdma_cm_id *np_cm_id; - struct mutex np_accept_mutex; - struct list_head np_accept_list; - struct completion np_login_comp; + struct semaphore sem; + struct rdma_cm_id *cm_id; + struct mutex mutex; + struct list_head accepted; + struct list_head pending; }; diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY config JOYSTICK_ZHENHUA tristate "5-byte Zhenhua RC transmitter" select SERIO + select BITREVERSE help Say Y here if you have a Zhen Hua PPM-4CH transmitter which is supplied with a ready to fly micro electric indoor helicopters diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) if (w->counter == 24) { /* full frame */ walkera0701_parse_frame(w); w->counter = NO_SYNC; - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ w->counter = 0; } else { if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) @@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) } else w->counter = NO_SYNC; } - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ w->counter = 0; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); if (error) - return error; + goto err_free_keypad; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, default: reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; - }; + } error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - for_each_set_bit(i, dev->absbit, ABS_CNT) { + for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index 5f191071d44a..e4eb048d1bf6 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa) memcpy(&cyapa->product_id[13], &resp_data[62], 2); cyapa->product_id[15] = '\0'; + /* Get the number of Rx electrodes. */ rotat_align = resp_data[68]; - if (rotat_align) { - cyapa->electrodes_rx = cyapa->electrodes_y; - cyapa->electrodes_rx = cyapa->electrodes_y; - } else { - cyapa->electrodes_rx = cyapa->electrodes_x; - cyapa->electrodes_rx = cyapa->electrodes_y; - } + cyapa->electrodes_rx = + rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x; cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u; if (!cyapa->electrodes_x || !cyapa->electrodes_y || diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -60,7 +60,7 @@ struct elan_transport_ops { int (*get_sm_version)(struct i2c_client *client, u8* ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); - int (*get_product_id)(struct i2c_client *client, u8 *id); + int (*get_product_id)(struct i2c_client *client, u16 *id); int (*get_max)(struct i2c_client *client, unsigned int *max_x, unsigned int *max_y); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -40,7 +40,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.0" +#define ELAN_DRIVER_VERSION "1.6.1" #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -76,7 +76,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; - u8 product_id; + u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; @@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, u16 *signature_address) { switch (iap_version) { + case 0x00: + case 0x06: case 0x08: *validpage_count = 512; break; + case 0x03: + case 0x07: case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; + case 0x0E: + *validpage_count = 640; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, &data->fw_signature_address); - if (error) { - dev_err(&data->client->dev, - "unknown iap version %d\n", data->iap_version); - return error; - } + if (error) + dev_warn(&data->client->dev, + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", + data->iap_version, data->ic_type); return 0; } @@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; + if (data->fw_validpage_count == 0) + return -EINVAL; + /* Look for a firmware with the product id appended. */ fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return 0; } -static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) +static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[0]; + *id = le16_to_cpup((__le16 *)val); return 0; } diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) +static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[1]; + *id = be16_to_cpup((__be16 *)val); return 0; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) struct synaptics_data *priv = psmouse->private; priv->mode = 0; - - if (priv->absolute_mode) { + if (priv->absolute_mode) priv->mode |= SYN_BIT_ABSOLUTE_MODE; - if (SYN_CAP_EXTENDED(priv->capabilities)) - priv->mode |= SYN_BIT_W_MODE; - } - - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) + if (priv->disable_gesture) priv->mode |= SYN_BIT_DISABLE_GESTURE; - if (psmouse->rate >= 80) priv->mode |= SYN_BIT_HIGH_RATE; + if (SYN_CAP_EXTENDED(priv->capabilities)) + priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, - command == PS2_CMD_RESET_BAT ? 1000 : 200)) - goto out; + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } - for (i = 0; i < send; i++) - if (ps2_sendbyte(ps2dev, param[i], 200)) - goto out; + for (i = 0; i < send; i++) { + if (ps2_sendbyte(ps2dev, param[i], 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } + } /* * The reset command takes a long time to execute. @@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) !(ps2dev->flags & PS2_FLAG_CMD), timeout); } + serio_pause_rx(ps2dev->serio); + if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) - goto out; + goto out_reset_flags; rc = 0; - out: - serio_pause_rx(ps2dev->serio); + out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -194,6 +194,7 @@ static int __init parkbd_init(void) parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); + parport_unregister_device(parkbd_dev); return -ENOMEM; } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 0f5f968592bd..04edc8f7122f 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val) static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) { + int value; struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); if (ts->model == 7845) { - return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; + value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1])); } else { /* * adjust: on-wire is a must-ignore bit, a BE12 value, then * padding; built from two 8 bit values written msb-first. */ - return be16_to_cpup((__be16 *)t->rx_buf) >> 3; + value = be16_to_cpup((__be16 *)t->rx_buf); } + + /* enforce ADC output is 12 bits width */ + return (value >> 3) & 0xfff; } static void ads7846_update_value(struct spi_message *m, int val) diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -94,7 +94,7 @@ struct imx6ul_tsc { * TSC module need ADC to get the measure value. So * before config TSC, we should initialize ADC module. */ -static void imx6ul_adc_init(struct imx6ul_tsc *tsc) +static int imx6ul_adc_init(struct imx6ul_tsc *tsc) { int adc_hc = 0; int adc_gc; @@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) timeout = wait_for_completion_timeout (&tsc->completion, ADC_TIMEOUT); - if (timeout == 0) + if (timeout == 0) { dev_err(tsc->dev, "Timeout for adc calibration\n"); + return -ETIMEDOUT; + } adc_gs = readl(tsc->adc_regs + REG_ADC_GS); - if (adc_gs & ADC_CALF) + if (adc_gs & ADC_CALF) { dev_err(tsc->dev, "ADC calibration failed\n"); + return -EINVAL; + } /* TSC need the ADC work in hardware trigger */ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); adc_cfg |= ADC_HARDWARE_TRIGGER; writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); + + return 0; } /* @@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); } -static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) +static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) { - imx6ul_adc_init(tsc); + int err; + + err = imx6ul_adc_init(tsc); + if (err) + return err; imx6ul_tsc_channel_config(tsc); imx6ul_tsc_set(tsc); + + return 0; } static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) @@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) return err; } - imx6ul_tsc_init(tsc); - - return 0; + return imx6ul_tsc_init(tsc); } static void imx6ul_tsc_close(struct input_dev *input_dev) @@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) int tsc_irq; int adc_irq; - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) return -ENOMEM; @@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) if (!input_dev) return -ENOMEM; - input_dev->name = "iMX6UL TouchScreen Controller"; + input_dev->name = "iMX6UL Touchscreen Controller"; input_dev->id.bustype = BUS_HOST; input_dev->open = imx6ul_tsc_open; @@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) } adc_irq = platform_get_irq(pdev, 1); - if (adc_irq <= 0) { + if (adc_irq < 0) { dev_err(&pdev->dev, "no adc irq resource?\n"); return adc_irq; } @@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) goto out; } - imx6ul_tsc_init(tsc); + retval = imx6ul_tsc_init(tsc); } out: diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) if (of_property_read_u32(np, "x-size", &pdata->x_size)) { dev_err(dev, "failed to get x-size property\n"); return NULL; - }; + } if (of_property_read_u32(np, "y-size", &pdata->y_size)) { dev_err(dev, "failed to get y-size property\n"); return NULL; - }; + } of_property_read_u32(np, "contact-threshold", &pdata->contact_threshold); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..cbe6a890a93a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - # SWIOTLB guarantees a dma_to_phys() implementation - depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) + depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -43,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST endmenu config IOMMU_IOVA - bool + tristate config OF_IOMMU def_bool y diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..08d2775887f7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct amd_iommu *iommu; + /* + * First check if the device is still attached. It might already + * be detached from its domain because the generic + * iommu_detach_group code detached it and we try again here in + * our alias handling. + */ + if (!dev_data->domain) + return; + iommu = amd_iommu_rlookup_table[dev_data->devid]; /* decrease reference counters */ diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (!iommu->dev) return -ENODEV; + /* Prevent binding other PCI device drivers to IOMMU devices */ + iommu->dev->match_driver = false; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -56,6 +56,7 @@ #define IDR0_TTF_SHIFT 2 #define IDR0_TTF_MASK 0x3 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) +#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) #define IDR0_S1P (1 << 1) #define IDR0_S2P (1 << 0) @@ -342,7 +343,8 @@ #define CMDQ_TLBI_0_VMID_SHIFT 32 #define CMDQ_TLBI_0_ASID_SHIFT 48 #define CMDQ_TLBI_1_LEAF (1UL << 0) -#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL +#define CMDQ_TLBI_1_VA_MASK ~0xfffUL +#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL #define CMDQ_PRI_0_SSID_SHIFT 12 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL @@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_VA: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; - /* Fallthrough */ + cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; + break; case CMDQ_OP_TLBI_S2_IPA: cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; - cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; @@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) } /* We only support the AArch64 table format at present */ - if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { + switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { + case IDR0_TTF_AARCH32_64: + smmu->ias = 40; + /* Fallthrough */ + case IDR0_TTF_AARCH64: + break; + default: dev_err(smmu->dev, "AArch64 table format not supported!\n"); return -ENXIO; } @@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); - if (!smmu->ias) - smmu->ias = smmu->oas; + smmu->ias = max(smmu->ias, smmu->oas); dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..d65cf42399e8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return -ENOMEM; /* It is large page*/ if (largepage_lvl > 1) { + unsigned long nr_superpages, end_pfn; + pteval |= DMA_PTE_LARGE_PAGE; lvl_pages = lvl_to_nr_pages(largepage_lvl); + + nr_superpages = sg_res / lvl_pages; + end_pfn = iov_pfn + nr_superpages * lvl_pages - 1; + /* * Ensure that old small page tables are - * removed to make room for superpage, - * if they exist. + * removed to make room for superpage(s). */ - dma_pte_free_pagetable(domain, iov_pfn, - iov_pfn + lvl_pages - 1); + dma_pte_free_pagetable(domain, iov_pfn, end_pfn); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; } @@ -2301,6 +2305,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); + free_devinfo_mem(info); return NULL; } @@ -3215,6 +3220,8 @@ static struct iova *intel_alloc_iova(struct device *dev, /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + /* Ensure we reserve the whole size-aligned region */ + nrpages = __roundup_pow_of_two(nrpages); if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* @@ -3711,7 +3718,7 @@ static inline int iommu_devinfo_cache_init(void) static int __init iommu_init_mempool(void) { int ret; - ret = iommu_iova_cache_init(); + ret = iova_cache_get(); if (ret) return ret; @@ -3725,7 +3732,7 @@ static int __init iommu_init_mempool(void) kmem_cache_destroy(iommu_domain_cache); domain_error: - iommu_iova_cache_destroy(); + iova_cache_put(); return -ENOMEM; } @@ -3734,7 +3741,7 @@ static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); - iommu_iova_cache_destroy(); + iova_cache_put(); } static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; static bool selftest_running = false; -static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) +static dma_addr_t __arm_lpae_dma_addr(void *pages) { - return phys_to_dma(dev, virt_to_phys(pages)); + return (dma_addr_t)virt_to_phys(pages); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, @@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, goto out_free; /* * We depend on the IOMMU being able to work with any physical - * address directly, so if the DMA layer suggests it can't by - * giving us back some translation, that bodes very badly... + * address directly, so if the DMA layer suggests otherwise by + * translating or truncating them, that bodes very badly... */ - if (dma != __arm_lpae_dma_addr(dev, pages)) + if (dma != virt_to_phys(pages)) goto out_unmap; } @@ -243,10 +243,8 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - if (!selftest_running) - dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), + dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); } @@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - *ptep = pte; if (!selftest_running) - dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), + dma_sync_single_for_device(cfg->iommu_dev, + __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } @@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) return NULL; + if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { + dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); + return NULL; + } + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -18,42 +18,9 @@ */ #include <linux/iova.h> +#include <linux/module.h> #include <linux/slab.h> -static struct kmem_cache *iommu_iova_cache; - -int iommu_iova_cache_init(void) -{ - int ret = 0; - - iommu_iova_cache = kmem_cache_create("iommu_iova", - sizeof(struct iova), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_iova_cache) { - pr_err("Couldn't create iova cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -void iommu_iova_cache_destroy(void) -{ - kmem_cache_destroy(iommu_iova_cache); -} - -struct iova *alloc_iova_mem(void) -{ - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); -} - -void free_iova_mem(struct iova *iova) -{ - kmem_cache_free(iommu_iova_cache, iova); -} - void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn, unsigned long pfn_32bit) @@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit; } +EXPORT_SYMBOL_GPL(init_iova_domain); static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) @@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) } } -/* Computes the padding size required, to make the - * the start address naturally aligned on its size +/* + * Computes the padding size required, to make the start address + * naturally aligned on the power-of-two order of its size */ -static int -iova_get_pad_size(int size, unsigned int limit_pfn) +static unsigned int +iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - unsigned int pad_size = 0; - unsigned int order = ilog2(size); - - if (order) - pad_size = (limit_pfn + 1) % (1 << order); - - return pad_size; + return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) rb_insert_color(&iova->node, root); } +static struct kmem_cache *iova_cache; +static unsigned int iova_cache_users; +static DEFINE_MUTEX(iova_cache_mutex); + +struct iova *alloc_iova_mem(void) +{ + return kmem_cache_alloc(iova_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL(alloc_iova_mem); + +void free_iova_mem(struct iova *iova) +{ + kmem_cache_free(iova_cache, iova); +} +EXPORT_SYMBOL(free_iova_mem); + +int iova_cache_get(void) +{ + mutex_lock(&iova_cache_mutex); + if (!iova_cache_users) { + iova_cache = kmem_cache_create( + "iommu_iova", sizeof(struct iova), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!iova_cache) { + mutex_unlock(&iova_cache_mutex); + printk(KERN_ERR "Couldn't create iova cache\n"); + return -ENOMEM; + } + } + + iova_cache_users++; + mutex_unlock(&iova_cache_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_cache_get); + +void iova_cache_put(void) +{ + mutex_lock(&iova_cache_mutex); + if (WARN_ON(!iova_cache_users)) { + mutex_unlock(&iova_cache_mutex); + return; + } + iova_cache_users--; + if (!iova_cache_users) + kmem_cache_destroy(iova_cache); + mutex_unlock(&iova_cache_mutex); +} +EXPORT_SYMBOL_GPL(iova_cache_put); + /** * alloc_iova - allocates an iova * @iovad: - iova domain in question @@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - /* If size aligned is set then round the size to - * to next power of two. - */ - if (size_aligned) - size = __roundup_pow_of_two(size); - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); @@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, return new_iova; } +EXPORT_SYMBOL_GPL(alloc_iova); /** * find_iova - find's an iova for a given pfn @@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } +EXPORT_SYMBOL_GPL(find_iova); /** * __free_iova - frees the given iova @@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } +EXPORT_SYMBOL_GPL(__free_iova); /** * free_iova - finds and frees the iova for a given pfn @@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) __free_iova(iovad, iova); } +EXPORT_SYMBOL_GPL(free_iova); /** * put_iova_domain - destroys the iova doamin @@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(put_iova_domain); static int __is_range_overlap(struct rb_node *node, @@ -467,6 +480,7 @@ finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } +EXPORT_SYMBOL_GPL(reserve_iova); /** * copy_reserved_iova - copies the reserved between domains @@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(copy_reserved_iova); struct iova * split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, @@ -534,3 +549,6 @@ error: free_iova_mem(prev); return NULL; } + +MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index e9c6f2a5b52d..cd7d3bc78e34 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -65,12 +65,10 @@ static void combiner_unmask_irq(struct irq_data *data) __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); } -static void combiner_handle_cascade_irq(unsigned int __irq, - struct irq_desc *desc) +static void combiner_handle_cascade_irq(struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int irq = irq_desc_get_irq(desc); unsigned int cascade_irq, combiner_irq; unsigned long status; @@ -88,7 +86,7 @@ static void combiner_handle_cascade_irq(unsigned int __irq, cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); if (unlikely(!cascade_irq)) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); else generic_handle_irq(cascade_irq); @@ -165,7 +163,7 @@ static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); irq_set_chip_data(irq, &combiner_data[hw >> 3]); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); return 0; } diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 39b72da0c143..655cb967a1f2 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -200,7 +200,6 @@ static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, { irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, handle_simple_irq); - set_irq_flags(virq, IRQF_VALID); return 0; } @@ -317,7 +316,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_level_irq); } - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(virq); return 0; } @@ -447,8 +446,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} #endif -static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, - struct irq_desc *desc) +static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long irqmap, irqn, irqsrc, cpuid; diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 9da9942ac83c..f6d680485bee 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *gc = dgc->gc[0]; + struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - /* Disable interrupt on AIC5 */ - irq_gc_lock(gc); + /* + * Disable interrupt on AIC5. We always take the lock of the + * first irq chip as all chips share the same registers. + */ + irq_gc_lock(bgc); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; - irq_gc_unlock(gc); + irq_gc_unlock(bgc); } static void aic5_unmask(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *gc = dgc->gc[0]; + struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); + /* + * Enable interrupt on AIC5. We always take the lock of the + * first irq chip as all chips share the same registers. + */ + irq_gc_lock(bgc); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IECR); gc->mask_cache |= d->mask; - irq_gc_unlock(gc); + irq_gc_unlock(bgc); } static int aic5_retrigger(struct irq_data *d) diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index ed4ca9deca70..bf9cc5f2e839 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -96,7 +96,7 @@ struct armctrl_ic { static struct armctrl_ic intc __read_mostly; static void __exception_irq_entry bcm2835_handle_irq( struct pt_regs *regs); -static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc); +static void bcm2836_chained_handle_irq(struct irq_desc *desc); static void armctrl_mask_irq(struct irq_data *d) { @@ -166,7 +166,7 @@ static int __init armctrl_of_init(struct device_node *node, BUG_ON(irq <= 0); irq_set_chip_and_handler(irq, &armctrl_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); } } @@ -245,7 +245,7 @@ static void __exception_irq_entry bcm2835_handle_irq( handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); } -static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc) +static void bcm2836_chained_handle_irq(struct irq_desc *desc) { u32 hwirq; diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 409bdc6366c2..0fea985ef1dc 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -115,7 +115,7 @@ static inline void l1_writel(u32 val, void __iomem *reg) writel(val, reg); } -static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) +static void bcm7038_l1_irq_handle(struct irq_desc *desc) { struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); struct bcm7038_l1_cpu *cpu; diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index d3f976913a6f..61b18ab33ad9 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -56,7 +56,7 @@ struct bcm7120_l2_intc_data { const __be32 *map_mask_prop; }; -static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) +static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) { struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); struct bcm7120_l2_intc_data *b = data->b; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index aedda06191eb..65cd341f331a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -49,13 +49,12 @@ struct brcmstb_l2_intc_data { u32 saved_mask; /* for suspend/resume */ }; -static void brcmstb_l2_intc_irq_handle(unsigned int __irq, - struct irq_desc *desc) +static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) { struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int irq = irq_desc_get_irq(desc); + unsigned int irq; u32 status; chained_irq_enter(chip, desc); @@ -65,7 +64,7 @@ static void brcmstb_l2_intc_irq_handle(unsigned int __irq, if (status == 0) { raw_spin_lock(&desc->lock); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); raw_spin_unlock(&desc->lock); goto out; } diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index 2dd929eed9e0..eb5eb0cd414d 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -132,14 +132,14 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_flow_handler_t handler = handle_level_irq; - unsigned int flags = IRQF_VALID | IRQF_PROBE; + unsigned int flags = 0; if (!clps711x_irqs[hw].flags) return 0; if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) { handler = handle_bad_irq; - flags |= IRQF_NOAUTOEN; + flags |= IRQ_NOAUTOEN; } else if (clps711x_irqs[hw].eoi) { handler = handle_fasteoi_irq; } @@ -149,7 +149,7 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi); irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler); - set_irq_flags(virq, flags); + irq_modify_status(virq, IRQ_NOPROBE, flags); return 0; } diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index efd95d9955e7..052f266364c0 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -26,7 +26,7 @@ #define APB_INT_FINALSTATUS_H 0x34 #define APB_INT_BASE_OFFSET 0x04 -static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) +static void dw_apb_ictl_handler(struct irq_desc *desc) { struct irq_domain *d = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index db04fc1f56b2..12985daa66ab 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -95,8 +95,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct v2m_data *v2m = irq_data_get_irq_chip_data(data); phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; - msg->address_hi = (u32) (addr >> 32); - msg->address_lo = (u32) (addr); + msg->address_hi = upper_32_bits(addr); + msg->address_lo = lower_32_bits(addr); msg->data = data->hwirq; } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) dev_alias->dev_id = alias; if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + dev_alias->count += its_pci_msi_vec_count(pdev); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 26b55c53755f..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) out: spin_unlock(&lpi_lock); + if (!bitmap) + *base = *nr_ids = 0; + return bitmap; } @@ -898,8 +901,10 @@ retry_baser: * non-cacheable as well. */ shr = tmp & GITS_BASER_SHAREABILITY_MASK; - if (!shr) + if (!shr) { cache = GITS_BASER_nC; + __flush_dcache_area(base, alloc_size); + } goto retry_baser; } @@ -1140,6 +1145,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, return NULL; } + __flush_dcache_area(itt, sz); + dev->its = its; dev->itt = itt; dev->nr_ites = nr_ites; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7deed6ef54c2..36ecfc870e5a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -70,11 +70,6 @@ static inline int gic_irq_in_rdist(struct irq_data *d) return gic_irq(d) < 32; } -static inline bool forwarded_irq(struct irq_data *d) -{ - return d->handler_data != NULL; -} - static inline void __iomem *gic_dist_base(struct irq_data *d) { if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ @@ -249,7 +244,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d) * disabled/masked will not get "stuck", because there is * noone to deactivate it (guest is being terminated). */ - if (forwarded_irq(d)) + if (irqd_is_forwarded_to_vcpu(d)) gic_poke_irq(d, GICD_ICACTIVER); } @@ -324,7 +319,7 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) * No need to deactivate an LPI, or an interrupt that * is is getting forwarded to a vcpu. */ - if (gic_irq(d) >= 8192 || forwarded_irq(d)) + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) return; gic_write_dir(gic_irq(d)); } @@ -357,7 +352,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type) static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) { - d->handler_data = vcpu; + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); return 0; } @@ -754,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_percpu_devid(irq); irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + irq_set_status_flags(irq, IRQ_NOAUTOEN); } /* SPIs */ if (hw >= 32 && hw < gic_data.irq_nr) { irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); } /* LPIs */ if (hw >= 8192 && hw < GIC_ID_NR) { @@ -768,7 +766,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, return -EPERM; irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID); } return 0; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index e6b7ed537952..982c09c2d791 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -145,29 +145,10 @@ static inline bool cascading_gic_irq(struct irq_data *d) void *data = irq_data_get_irq_handler_data(d); /* - * If handler_data pointing to one of the secondary GICs, then - * this is a cascading interrupt, and it cannot possibly be - * forwarded. + * If handler_data is set, this is a cascading interrupt, and + * it cannot possibly be forwarded. */ - if (data >= (void *)(gic_data + 1) && - data < (void *)(gic_data + MAX_GIC_NR)) - return true; - - return false; -} - -static inline bool forwarded_irq(struct irq_data *d) -{ - /* - * A forwarded interrupt: - * - is on the primary GIC - * - has its handler_data set to a value - * - that isn't a secondary GIC - */ - if (d->handler_data && !cascading_gic_irq(d)) - return true; - - return false; + return data != NULL; } /* @@ -201,7 +182,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d) * disabled/masked will not get "stuck", because there is * noone to deactivate it (guest is being terminated). */ - if (forwarded_irq(d)) + if (irqd_is_forwarded_to_vcpu(d)) gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); } @@ -218,7 +199,7 @@ static void gic_eoi_irq(struct irq_data *d) static void gic_eoimode1_eoi_irq(struct irq_data *d) { /* Do not deactivate an IRQ forwarded to a vcpu. */ - if (forwarded_irq(d)) + if (irqd_is_forwarded_to_vcpu(d)) return; writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); @@ -296,7 +277,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) if (cascading_gic_irq(d)) return -EINVAL; - d->handler_data = vcpu; + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); return 0; } @@ -357,7 +341,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) } while (1); } -static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) +static void gic_handle_cascade_irq(struct irq_desc *desc) { struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -376,7 +360,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); if (unlikely(gic_irq < 32 || gic_irq > 1020)) - handle_bad_irq(cascade_irq, desc); + handle_bad_irq(desc); else generic_handle_irq(cascade_irq); @@ -906,11 +890,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_percpu_devid(irq); irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + irq_set_status_flags(irq, IRQ_NOAUTOEN); } else { irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); } return 0; } @@ -1119,12 +1103,49 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, #ifdef CONFIG_OF static int gic_cnt __initdata; +static bool gic_check_eoimode(struct device_node *node, void __iomem **base) +{ + struct resource cpuif_res; + + of_address_to_resource(node, 1, &cpuif_res); + + if (!is_hyp_mode_available()) + return false; + if (resource_size(&cpuif_res) < SZ_8K) + return false; + if (resource_size(&cpuif_res) == SZ_128K) { + u32 val_low, val_high; + + /* + * Verify that we have the first 4kB of a GIC400 + * aliased over the first 64kB by checking the + * GICC_IIDR register on both ends. + */ + val_low = readl_relaxed(*base + GIC_CPU_IDENT); + val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); + if ((val_low & 0xffff0fff) != 0x0202043B || + val_low != val_high) + return false; + + /* + * Move the base up by 60kB, so that we have a 8kB + * contiguous region, which allows us to use GICC_DIR + * at its normal offset. Please pass me that bucket. + */ + *base += 0xf000; + cpuif_res.start += 0xf000; + pr_warn("GIC: Adjusting CPU interface base to %pa", + &cpuif_res.start); + } + + return true; +} + static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *cpu_base; void __iomem *dist_base; - struct resource cpu_res; u32 percpu_offset; int irq; @@ -1137,14 +1158,11 @@ gic_of_init(struct device_node *node, struct device_node *parent) cpu_base = of_iomap(node, 1); WARN(!cpu_base, "unable to map gic cpu registers\n"); - of_address_to_resource(node, 1, &cpu_res); - /* * Disable split EOI/Deactivate if either HYP is not available * or the CPU interface is too small. */ - if (gic_cnt == 0 && (!is_hyp_mode_available() || - resource_size(&cpu_res) < SZ_8K)) + if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) static_key_slow_dec(&supports_deactivate); if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index a0128c7c98dd..8f3ca8f3a62b 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -307,11 +307,11 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_percpu_devid(irq); irq_set_chip_and_handler(irq, &hip04_irq_chip, handle_percpu_devid_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + irq_set_status_flags(irq, IRQ_NOAUTOEN); } else { irq_set_chip_and_handler(irq, &hip04_irq_chip, handle_fasteoi_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); } irq_set_chip_data(irq, d->host_data); return 0; diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 4836102ba312..e484fd255321 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -352,7 +352,7 @@ void __init init_i8259_irqs(void) __init_i8259_irqs(NULL); } -static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc) +static void i8259_irq_dispatch(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); int hwirq = i8259_irq(); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index 841604b81004..c02d29c9dc05 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -218,7 +218,7 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on) return 0; } -static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc) +static void pdc_intc_perip_isr(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct pdc_intc_priv *priv; @@ -240,7 +240,7 @@ found: generic_handle_irq(irq_no); } -static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) +static void pdc_intc_syswake_isr(struct irq_desc *desc) { struct pdc_intc_priv *priv; unsigned int syswake, irq_no; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index c1517267b5db..deb89d63a728 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -83,7 +83,7 @@ static void keystone_irq_ack(struct irq_data *d) /* nothing to do here */ } -static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc) +static void keystone_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); @@ -127,7 +127,7 @@ static int keystone_irq_map(struct irq_domain *h, unsigned int virq, irq_set_chip_data(virq, kirq); irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq); - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(virq); return 0; } diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c index 5f4c52928d16..8c38b3d92e1c 100644 --- a/drivers/irqchip/irq-metag-ext.c +++ b/drivers/irqchip/irq-metag-ext.c @@ -446,7 +446,7 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type) * Whilst using TR2 to detect external interrupts is a software convention it is * (hopefully) unlikely to change. */ -static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) +static void meta_intc_irq_demux(struct irq_desc *desc) { struct meta_intc_priv *priv = &meta_intc_priv; irq_hw_number_t hw; diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c index 3d23ce3edb5c..a5f053bd2f44 100644 --- a/drivers/irqchip/irq-metag.c +++ b/drivers/irqchip/irq-metag.c @@ -220,7 +220,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data, * occurred. It is this function's job to demux this irq and * figure out exactly which trigger needs servicing. */ -static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) +static void metag_internal_irq_demux(struct irq_desc *desc) { struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); irq_hw_number_t hw; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 1764bcf8ee6b..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) intrmask[i] = gic_read(intrmask_reg); pending_reg += gic_reg_step; intrmask_reg += gic_reg_step; + + if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) + continue; + + pending[i] |= (u64)gic_read(pending_reg) << 32; + intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; + pending_reg += gic_reg_step; + intrmask_reg += gic_reg_step; } bitmap_and(pending, pending, intrmask, gic_shared_intrs); @@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, cpumask_first(&tmp)); + gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); /* Update the pcpu_masks */ for (i = 0; i < NR_CPUS; i++) @@ -546,7 +554,7 @@ static void __gic_irq_dispatch(void) gic_handle_shared_int(false); } -static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) +static void gic_irq_dispatch(struct irq_desc *desc) { gic_handle_local_int(true); gic_handle_shared_int(true); @@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, GIC_SHARED_TO_HWIRQ(intr)); int i; - gic_map_to_vpe(intr, cpu); + gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); for (i = 0; i < NR_CPUS; i++) clear_bit(intr, pcpu_masks[i].pcpu_mask); set_bit(intr, pcpu_masks[cpu].pcpu_mask); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 781ed6e71dbb..013fc9659a84 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -129,7 +129,7 @@ struct irq_chip icu_irq_chip = { .irq_unmask = icu_unmask_irq, }; -static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void icu_mux_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_domain *domain; @@ -164,7 +164,6 @@ static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -234,7 +233,6 @@ void __init icu_init_irq(void) for (irq = 0; irq < 64; irq++) { icu_mask_irq(irq_get_irq_data(irq)); irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID); } irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp_handle_irq); @@ -337,7 +335,6 @@ void __init mmp2_init_icu(void) irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); } - set_irq_flags(irq, IRQF_VALID); } irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp2_handle_irq); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 1faf812f3dc8..604df63e2edf 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -84,7 +84,6 @@ static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); - set_irq_flags(virq, IRQF_VALID); return 0; } diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index 5ea999a724b5..be4c5a8c9659 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c @@ -106,7 +106,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init); #define ORION_BRIDGE_IRQ_CAUSE 0x00 #define ORION_BRIDGE_IRQ_MASK 0x04 -static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) +static void orion_bridge_irq_handler(struct irq_desc *desc) { struct irq_domain *d = irq_desc_get_handler_data(desc); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 0670ab4e3897..9525335723f6 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -283,6 +283,9 @@ static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type) static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); if (!p->clk) return 0; @@ -332,6 +335,12 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) return status; } +/* + * This lock class tells lockdep that INTC External IRQ Pin irqs are in a + * different category than their parents, so it won't report false recursion. + */ +static struct lock_class_key intc_irqpin_irq_lock_class; + static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { @@ -342,8 +351,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, intc_irqpin_dbg(&p->irq[hw], "map"); irq_set_chip_data(virq, h->host_data); + irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); - set_irq_flags(virq, IRQF_VALID); /* kill me now */ return 0; } diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 2aa3add711a6..35bf97ba4a3d 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -121,6 +121,9 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) { struct irqc_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); if (!p->clk) return 0; @@ -150,6 +153,12 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) return IRQ_NONE; } +/* + * This lock class tells lockdep that IRQC irqs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key irqc_irq_lock_class; + static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { @@ -157,6 +166,7 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, irqc_dbg(&p->irq[hw], "map"); irq_set_chip_data(virq, h->host_data); + irq_set_lockdep_class(virq, &irqc_irq_lock_class); irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); return 0; } diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index 506d9f20ca51..7154b011ddd2 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c @@ -298,7 +298,7 @@ static struct irq_chip s3c_irq_eint0t4 = { .irq_set_type = s3c_irqext0_type, }; -static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void s3c_irq_demux(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); @@ -466,13 +466,11 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, irq_set_chip_data(virq, irq_data); - set_irq_flags(virq, IRQF_VALID); - if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { if (irq_data->parent_irq > 31) { pr_err("irq-s3c24xx: parent irq %lu is out of range\n", irq_data->parent_irq); - goto err; + return -EINVAL; } parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; @@ -485,18 +483,12 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, if (!irqno) { pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", irq_data->parent_irq); - goto err; + return -EINVAL; } irq_set_chained_handler(irqno, s3c_irq_demux); } return 0; - -err: - set_irq_flags(virq, 0); - - /* the only error can result from bad mapping data*/ - return -EINVAL; } static const struct irq_domain_ops s3c24xx_irq_ops = { @@ -1174,8 +1166,6 @@ static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, irq_set_chip_data(virq, irq_data); - set_irq_flags(virq, IRQF_VALID); - return 0; } diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index 4ad3e7c69aa7..0704362f4c82 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -83,7 +83,7 @@ static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq); - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(virq); return 0; } diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 772a82cacbf7..c143dd58410c 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -58,7 +58,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) return irq_reg_readl(gc, off); } -static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) +static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 331829661366..848d782a2a3b 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -97,7 +97,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) return IRQ_SET_MASK_OK; } -static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc) +static void tb10x_irq_cascade(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 16123f688768..598ab3f0e0ac 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -65,19 +65,19 @@ static void fpga_irq_unmask(struct irq_data *d) writel(mask, f->base + IRQ_ENABLE_SET); } -static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc) +static void fpga_irq_handle(struct irq_desc *desc) { struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - unsigned int irq = irq_desc_get_irq(desc); u32 status = readl(f->base + IRQ_STATUS); if (status == 0) { - do_bad_IRQ(irq, desc); + do_bad_IRQ(desc); return; } do { - irq = ffs(status) - 1; + unsigned int irq = ffs(status) - 1; + status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); @@ -128,7 +128,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_data(irq, f); irq_set_chip_and_handler(irq, &f->chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); return 0; } diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 03846dff4212..b956dfffe78c 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -201,7 +201,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, return -EPERM; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(irq); return 0; } @@ -225,7 +225,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) return handled; } -static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) +static void vic_handle_irq_cascaded(struct irq_desc *desc) { u32 stat, hwirq; struct irq_chip *host_chip = irq_desc_get_chip(desc); diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index 8371d9978d31..f9af0af21751 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -167,7 +167,6 @@ static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); - set_irq_flags(virq, IRQF_VALID); return 0; } diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 4cbd9c5dc1e6..1ccd2abed65f 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -182,7 +182,7 @@ static struct spear_shirq *spear320_shirq_blocks[] = { &spear320_shirq_intrcomm_ras, }; -static void shirq_handler(unsigned __irq, struct irq_desc *desc) +static void shirq_handler(struct irq_desc *desc) { struct spear_shirq *shirq = irq_desc_get_handler_data(desc); u32 pend; @@ -211,7 +211,6 @@ static void __init spear_shirq_register(struct spear_shirq *shirq, for (i = 0; i < shirq->nr_irqs; i++) { irq_set_chip_and_handler(shirq->virq_base + i, shirq->irq_chip, handle_simple_irq); - set_irq_flags(shirq->virq_base + i, IRQF_VALID); irq_set_chip_data(shirq->virq_base + i, shirq); } } diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 70f4255ff291..42990f2d0317 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -170,6 +170,7 @@ config LEDS_SUNFIRE config LEDS_IPAQ_MICRO tristate "LED Support for the Compaq iPAQ h3xxx" + depends on LEDS_CLASS depends on MFD_IPAQ_MICRO help Choose this option if you want to use the notification LED on @@ -229,7 +230,7 @@ config LEDS_LP55XX_COMMON tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 select FW_LOADER - select FW_LOADER_USER_HELPER_FALLBACK + select FW_LOADER_USER_HELPER help This option supports common operations for LP5521/5523/55231/5562/8501 devices. diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c index fd7c25fd29c1..ac77d36b630c 100644 --- a/drivers/leds/leds-aat1290.c +++ b/drivers/leds/leds-aat1290.c @@ -331,7 +331,7 @@ static void aat1290_led_validate_mm_current(struct aat1290_led *led, cfg->max_brightness = b + 1; } -int init_mm_current_scale(struct aat1290_led *led, +static int init_mm_current_scale(struct aat1290_led *led, struct aat1290_led_config_data *cfg) { int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, @@ -559,6 +559,7 @@ static const struct of_device_id aat1290_led_dt_match[] = { { .compatible = "skyworks,aat1290" }, {}, }; +MODULE_DEVICE_TABLE(of, aat1290_led_dt_match); static struct platform_driver aat1290_led_driver = { .probe = aat1290_led_probe, diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 986fe1e28f84..1793727bc9ae 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -395,6 +395,7 @@ static const struct of_device_id bcm6328_leds_of_match[] = { { .compatible = "brcm,bcm6328-leds", }, { }, }; +MODULE_DEVICE_TABLE(of, bcm6328_leds_of_match); static struct platform_driver bcm6328_leds_driver = { .probe = bcm6328_leds_probe, diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c index 21f96930b3be..7ea3526702e0 100644 --- a/drivers/leds/leds-bcm6358.c +++ b/drivers/leds/leds-bcm6358.c @@ -226,6 +226,7 @@ static const struct of_device_id bcm6358_leds_of_match[] = { { .compatible = "brcm,bcm6358-leds", }, { }, }; +MODULE_DEVICE_TABLE(of, bcm6358_leds_of_match); static struct platform_driver bcm6358_leds_driver = { .probe = bcm6358_leds_probe, diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c index 2ae8c4d17ff8..feca07be85f5 100644 --- a/drivers/leds/leds-ktd2692.c +++ b/drivers/leds/leds-ktd2692.c @@ -426,6 +426,7 @@ static const struct of_device_id ktd2692_match[] = { { .compatible = "kinetic,ktd2692", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, ktd2692_match); static struct platform_driver ktd2692_driver = { .driver = { diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c index df348a06d8c7..afbb1409b2e2 100644 --- a/drivers/leds/leds-max77693.c +++ b/drivers/leds/leds-max77693.c @@ -1080,6 +1080,7 @@ static const struct of_device_id max77693_led_dt_match[] = { { .compatible = "maxim,max77693-led" }, {}, }; +MODULE_DEVICE_TABLE(of, max77693_led_dt_match); static struct platform_driver max77693_led_driver = { .probe = max77693_led_probe, diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index b33514d9f427..a95a61220169 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -337,6 +337,7 @@ static const struct of_device_id of_ns2_leds_match[] = { { .compatible = "lacie,ns2-leds", }, {}, }; +MODULE_DEVICE_TABLE(of, of_ns2_leds_match); #endif /* CONFIG_OF_GPIO */ struct ns2_led_priv { diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index de36237d7c6b..051645498b53 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOTSUPP; dev_err(&pdev->dev, "IO mapped PCI devices are not supported\n"); - goto out_release; + goto out_iounmap; } pci_set_drvdata(pdev, priv); @@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); if (ret < 0) - goto out_iounmap; + goto out_mcb_bus; num_cells = ret; dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); @@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +out_mcb_bus: + mcb_release_bus(priv->bus); out_iounmap: iounmap(priv->base); out_release: diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, - bitmap->cluster_slot); + mddev_is_clustered(bitmap->mddev) + ? bitmap->cluster_slot : 0); if (ret) goto err; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 20cc36b01b77..0a17d1b91a81 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -634,10 +634,10 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super = dm_block_data(sblock); + disk_super->flags = cpu_to_le32(cmd->flags); if (mutator) update_flags(disk_super, mutator); - disk_super->flags = cpu_to_le32(cmd->flags); disk_super->mapping_root = cpu_to_le64(cmd->root); disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c @@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", .version = {1, 0, 0}, - .hint_size = 0, + .hint_size = 4, .owner = THIS_MODULE, .create = wb_create }; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d60c88df5234..4b3b6f8aff0c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); /* * Generate a new unfragmented bio with the given size - * This should never violate the device limitations + * This should never violate the device limitations (but only because + * max_segment_size is being constrained to PAGE_SIZE). * * This function may be called concurrently. If we allocate from the mempool * concurrently, there is a possibility of deadlock. For example, if we have @@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti, return fn(ti, cc->dev, cc->start, ti->len, data); } +static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + /* + * Unfortunate constraint that is required to avoid the potential + * for exceeding underlying device's max_segments limits -- due to + * crypt_alloc_buffer() possibly allocating pages for the encryption + * bio that are not as physically contiguous as the original bio. + */ + limits->max_segment_size = PAGE_SIZE; +} + static struct target_type crypt_target = { .name = "crypt", - .version = {1, 14, 0}, + .version = {1, 14, 1}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -2058,6 +2070,7 @@ static struct target_type crypt_target = { .resume = crypt_resume, .message = crypt_message, .iterate_devices = crypt_iterate_devices, + .io_hints = crypt_io_hints, }; static int __init dm_crypt_init(void) diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -EINVAL; } - tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); + tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); if (!tmp_store) { ti->error = "Exception store allocation failed"; return -ENOMEM; @@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, else if (persistent == 'N') type = get_type("N"); else { - ti->error = "Persistent flag is not P or N"; + ti->error = "Exception store type is not P or N"; r = -EINVAL; goto bad_type; } @@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, if (r) goto bad; - r = type->ctr(tmp_store, 0, NULL); + r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); if (r) { ti->error = "Exception store type constructor failed"; goto bad; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -42,8 +42,7 @@ struct dm_exception_store_type { const char *name; struct module *module; - int (*ctr) (struct dm_exception_store *store, - unsigned argc, char **argv); + int (*ctr) (struct dm_exception_store *store, char *options); /* * Destroys this object when you've finished with it. @@ -123,6 +122,8 @@ struct dm_exception_store { unsigned chunk_shift; void *context; + + bool userspace_supports_overflow; }; /* diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) */ if (min_region_size > (1 << 13)) { /* If not a power of 2, make it the next power of 2 */ - if (min_region_size & (min_region_size - 1)) - region_size = 1 << fls(region_size); + region_size = roundup_pow_of_two(min_region_size); DMINFO("Choosing default region size of %lu sectors", region_size); } else { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..117a05e40090 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -7,6 +7,7 @@ #include "dm-exception-store.h" +#include <linux/ctype.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> @@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) DMWARN("write header failed"); } -static int persistent_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int persistent_ctr(struct dm_exception_store *store, char *options) { struct pstore *ps; + int r; /* allocate the pstore */ ps = kzalloc(sizeof(*ps), GFP_KERNEL); @@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store, ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { - kfree(ps); DMERR("couldn't start header metadata update thread"); - return -ENOMEM; + r = -ENOMEM; + goto err_workqueue; + } + + if (options) { + char overflow = toupper(options[0]); + if (overflow == 'O') + store->userspace_supports_overflow = true; + else { + DMERR("Unsupported persistent store option: %s", options); + r = -EINVAL; + goto err_options; + } } store->context = ps; return 0; + +err_options: + destroy_workqueue(ps->metadata_wq); +err_workqueue: + kfree(ps); + + return r; } static unsigned persistent_status(struct dm_exception_store *store, @@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" P %llu", (unsigned long long)store->chunk_size); + DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", + (unsigned long long)store->chunk_size); } return sz; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, *metadata_sectors = 0; } -static int transient_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int transient_ctr(struct dm_exception_store *store, char *options) { struct transient_c *tc; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) } /* - * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> + * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> */ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, u.store_swap = snap_dest->store; snap_dest->store = snap_src->store; + snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; snap_src->store = u.store_swap; snap_dest->store->snap = snap_dest; @@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) pe = __find_pending_exception(s, pe, chunk); if (!pe) { - s->snapshot_overflowed = 1; - DMERR("Snapshot overflowed: Unable to allocate exception."); + if (s->store->userspace_supports_overflow) { + s->snapshot_overflowed = 1; + DMERR("Snapshot overflowed: Unable to allocate exception."); + } else + __invalidate_snapshot(s, -ENOMEM); r = -EIO; goto out_unlock; } @@ -2365,7 +2369,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 14, 0}, + .version = {1, 15, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6578b7bc1fbb..3897b90bd462 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) metadata_low_callback, pool); if (r) - goto out_free_pt; + goto out_flags_changed; pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct thin_c *tc = ti->private; struct pool *pool = tc->pool; + struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); + + if (!pool_limits->discard_granularity) + return; /* pool's discard support is disabled */ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; + int error = clone->bi_error; bio_put(clone); @@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) * the remainder. */ return; - else if (bio->bi_error) { + else if (error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed. */ - tio->error = bio->bi_error; + tio->error = error; return; } @@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) might_sleep(); - map = dm_get_live_table(md, &srcu_idx); - spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); @@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); + map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } - mutex_unlock(&md->suspend_lock); - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); + mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); goto unlock; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) return 0; out_free_conf: - if (conf->pool) - mempool_destroy(conf->pool); + mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 421a36c593e3..2e4c4cb79e4d 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, { int s; uint32_t max_entries = le32_to_cpu(left->header.max_entries); - unsigned target = (nr_left + nr_center + nr_right) / 3; - BUG_ON(target > max_entries); + unsigned total = nr_left + nr_center + nr_right; + unsigned target_right = total / 3; + unsigned remainder = (target_right * 3) != total; + unsigned target_left = target_right + remainder; + + BUG_ON(target_left > max_entries); + BUG_ON(target_right > max_entries); if (nr_left < nr_right) { - s = nr_left - target; + s = nr_left - target_left; if (s < 0 && nr_center < -s) { /* not enough in central node */ @@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, } else shift(left, center, s); - shift(center, right, target - nr_right); + shift(center, right, target_right - nr_right); } else { - s = target - nr_right; + s = target_right - nr_right; if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); @@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, } else shift(center, right, s); - shift(left, center, nr_left - target); + shift(left, center, nr_left - target_left); } *key_ptr(parent, c->index) = center->keys[0]; diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index b6cec258cc21..0e09aef43998 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) r = new_block(s->info, &right); if (r < 0) { - /* FIXME: put left */ + unlock_block(s->info, left); return r; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) struct md_rdev *rdev; bool discard_supported = false; - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; - } blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); @@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..ddd8a5f572aa 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (bio->bi_iter.bi_sector >= - conf->mddev->curr_resync_completed) { + if (bio->bi_iter.bi_sector >= conf->next_resync) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) conf->r1buf_pool = NULL; spin_lock_irq(&conf->resync_lock); - conf->next_resync = 0; + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; conf->start_next_window = MaxSector; conf->current_window_requests += conf->next_window_requests; @@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r1_bio = list_first_entry(&conf->bio_end_io_list, - struct r1bio, retry_list); + r1_bio = list_first_entry(&tmp, struct r1bio, + retry_list); list_del(&r1_bio->retry_list); raid_end_bio_io(r1_bio); } @@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); @@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..9f69dc526f8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r10_bio = list_first_entry(&conf->bio_end_io_list, - struct r10bio, retry_list); + r10_bio = list_first_entry(&tmp, struct r10bio, + retry_list); list_del(&r10_bio->retry_list); raid_end_bio_io(r10_bio); } @@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", mdname(mddev)); if (conf) { - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf); @@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); @@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) { struct r10conf *conf = priv; - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf->mirrors_old); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) drop_one_stripe(conf)) ; - if (conf->slab_cache) - kmem_cache_destroy(conf->slab_cache); + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); + if (bi) + s->to_read--; while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = @@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, */ clear_bit(R5_LOCKED, &sh->dev[i].flags); } + s->to_write = 0; + s->written = 0; if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (fdev[i]->towrite && !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) @@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (s->failed_num[i] != sh->pd_idx && s->failed_num[i] != sh->qd_idx && !test_bit(R5_UPTODATE, &fdev[i]->flags) && diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h index b055319d532e..c1e2d1834b78 100644 --- a/drivers/media/dvb-frontends/horus3a.h +++ b/drivers/media/dvb-frontends/horus3a.h @@ -46,8 +46,8 @@ extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, const struct horus3a_config *config, struct i2c_adapter *i2c); #else -static inline struct dvb_frontend *horus3a_attach( - const struct cxd2820r_config *config, +static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, + const struct horus3a_config *config, struct i2c_adapter *i2c) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h index 69f30e21f6b3..1f329ef05acc 100644 --- a/drivers/media/dvb-frontends/lnbh25.h +++ b/drivers/media/dvb-frontends/lnbh25.h @@ -43,7 +43,7 @@ struct dvb_frontend *lnbh25_attach( struct lnbh25_config *cfg, struct i2c_adapter *i2c); #else -static inline dvb_frontend *lnbh25_attach( +static inline struct dvb_frontend *lnbh25_attach( struct dvb_frontend *fe, struct lnbh25_config *cfg, struct i2c_adapter *i2c) diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index ff31e7a01ca9..feeeb70d841e 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -18,6 +18,27 @@ static struct dvb_frontend_ops m88ds3103_ops; +/* write single register with mask */ +static int m88ds3103_update_bits(struct m88ds3103_dev *dev, + u8 reg, u8 mask, u8 val) +{ + int ret; + u8 tmp; + + /* no need for read if whole reg is written */ + if (mask != 0xff) { + ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1); + if (ret) + return ret; + + val &= mask; + tmp &= ~mask; + val |= tmp; + } + + return regmap_bulk_write(dev->regmap, reg, &val, 1); +} + /* write reg val table using reg addr auto increment */ static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev, const struct m88ds3103_reg_val *tab, int tab_len) @@ -394,10 +415,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u8tmp2 = 0x00; /* 0b00 */ break; } - ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6); + ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6); + ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6); if (ret) goto err; } @@ -455,13 +476,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08); + ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08); if (ret) goto err; ret = regmap_write(dev->regmap, 0xf1, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80); + ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80); if (ret) goto err; } @@ -498,7 +519,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) switch (dev->cfg->ts_mode) { case M88DS3103_TS_SERIAL: case M88DS3103_TS_SERIAL_D7: - ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1); + ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1); if (ret) goto err; u8tmp1 = 0; @@ -567,11 +588,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1); + ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4); + ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4); if (ret) goto err; @@ -625,13 +646,13 @@ static int m88ds3103_init(struct dvb_frontend *fe) dev->warm = false; /* wake up device from sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00); if (ret) goto err; @@ -749,18 +770,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe) utmp = 0x29; else utmp = 0x27; - ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00); if (ret) goto err; /* sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); if (ret) goto err; @@ -992,12 +1013,12 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe, } utmp = tone << 7 | dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; utmp = 1 << 2; - ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp); + ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp); if (ret) goto err; @@ -1047,7 +1068,7 @@ static int m88ds3103_set_voltage(struct dvb_frontend *fe, voltage_dis ^= dev->cfg->lnb_en_pol; utmp = voltage_dis << 1 | voltage_sel << 0; - ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp); if (ret) goto err; @@ -1080,7 +1101,7 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe, } utmp = dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; @@ -1115,12 +1136,12 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe, } else { dev_dbg(&client->dev, "diseqc tx timeout\n"); - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); if (ret) goto err; @@ -1152,7 +1173,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe, } utmp = dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; @@ -1194,12 +1215,12 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe, } else { dev_dbg(&client->dev, "diseqc tx timeout\n"); - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); if (ret) goto err; @@ -1435,13 +1456,13 @@ static int m88ds3103_probe(struct i2c_client *client, goto err_kfree; /* sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); if (ret) goto err_kfree; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); if (ret) goto err_kfree; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); if (ret) goto err_kfree; diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 81788c5a44d8..821a8f481507 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -502,6 +502,10 @@ static int si2168_init(struct dvb_frontend *fe) /* firmware is in the new format */ for (remaining = fw->size; remaining > 0; remaining -= 17) { len = fw->data[fw->size - remaining]; + if (len > SI2168_ARGLEN) { + ret = -EINVAL; + break; + } memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); cmd.wlen = len; cmd.rlen = 1; diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c index f55b3276f28d..56773f3893d4 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c @@ -80,11 +80,9 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi) u16 reg; unsigned long flags; - if (!spi) { - dev_dbg(&spi->master->dev, - "%s(): SPI not initialized\n", __func__); + if (!spi) return IRQ_NONE; - } + spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); if (!(reg & NETUP_SPI_CTRL_IRQ)) { @@ -234,11 +232,9 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) unsigned long flags; struct netup_spi *spi = ndev->spi; - if (!spi) { - dev_dbg(&spi->master->dev, - "%s(): SPI not initialized\n", __func__); + if (!spi) return; - } + spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 486aef50d99b..f922f2e827bc 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -1097,7 +1097,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context) Elf32_Ehdr *ehdr; Elf32_Phdr *phdr; u8 __iomem *dst; - int err, i; + int err = 0, i; if (!fw || !context) return -EINVAL; @@ -1106,7 +1106,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context) phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff); /* go through the available ELF segments */ - for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) { + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { /* Only consider LOAD segments */ if (phdr->p_type != PT_LOAD) @@ -1192,7 +1192,6 @@ err: static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) { - int ret; int err; dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); @@ -1207,7 +1206,7 @@ static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) if (err) { dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); complete_all(&fei->fw_ack); - return ret; + return err; } return 0; diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c index 1c087cb76815..d0549fba711c 100644 --- a/drivers/media/rc/ir-hix5hd2.c +++ b/drivers/media/rc/ir-hix5hd2.c @@ -257,7 +257,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev) goto clkerr; if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt, - IRQF_NO_SUSPEND, pdev->name, priv) < 0) { + 0, pdev->name, priv) < 0) { dev_err(dev, "IRQ %d register failed\n", priv->irq); ret = -EINVAL; goto regerr; diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 507382160e5e..ce157edd45fa 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -166,6 +166,10 @@ static int si2157_init(struct dvb_frontend *fe) for (remaining = fw->size; remaining > 0; remaining -= 17) { len = fw->data[fw->size - remaining]; + if (len > SI2157_ARGLEN) { + dev_err(&client->dev, "Bad firmware length\n"); + goto err_release_firmware; + } memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); cmd.wlen = len; cmd.rlen = 1; diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index c3cac4c12fb3..197a4f2e54d2 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) unsigned int pipe; u8 requesttype; + mutex_lock(&d->usb_mutex); + + if (req->size > sizeof(dev->buf)) { + dev_err(&d->intf->dev, "too large message %u\n", req->size); + ret = -EINVAL; + goto err_mutex_unlock; + } + if (req->index & CMD_WR_FLAG) { /* write */ memcpy(dev->buf, req->data, req->size); @@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value, req->index, dev->buf, req->size); if (ret < 0) - goto err; + goto err_mutex_unlock; /* read request, copy returned data to return buf */ if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) memcpy(req->data, dev->buf, req->size); + mutex_unlock(&d->usb_mutex); + return 0; -err: +err_mutex_unlock: + mutex_unlock(&d->usb_mutex); dev_dbg(&d->intf->dev, "failed=%d\n", ret); return ret; } diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h index 9f6115a2ee01..138062960a73 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h @@ -71,7 +71,7 @@ struct rtl28xxu_dev { - u8 buf[28]; + u8 buf[128]; u8 chip_id; u8 tuner; char *tuner_name; diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 82876a67f144..9beece00869b 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -47,7 +47,7 @@ config V4L2_MEM2MEM_DEV # Used by LED subsystem flash drivers config V4L2_FLASH_LED_CLASS tristate "V4L2 flash API for LED flash class devices" - depends on VIDEO_V4L2_SUBDEV_API + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on LEDS_CLASS_FLASH ---help--- Say Y here to enable V4L2 flash API support for LED flash diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index c6a644b22af4..6f3154613dc7 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -58,12 +58,18 @@ config OMAP_GPMC memory drives like NOR, NAND, OneNAND, SRAM. config OMAP_GPMC_DEBUG - bool + bool "Enable GPMC debug output and skip reset of GPMC during init" depends on OMAP_GPMC help Enables verbose debugging mostly to decode the bootloader provided - timings. Enable this during development to configure devices - connected to the GPMC bus. + timings. To preserve the bootloader provided timings, the reset + of GPMC is skipped during init. Enable this during development to + configure devices connected to the GPMC bus. + + NOTE: In addition to matching the register setup with the bootloader + you also need to match the GPMC FCLK frequency used by the + bootloader or else the GPMC timings won't be identical with the + bootloader timings. config MVEBU_DEVBUS bool "Marvell EBU Device Bus Controller" diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 32ac049f2bc4..6515dfc2b805 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -696,7 +696,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, int div; u32 l; - gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings"); div = gpmc_calc_divider(t->sync_clk); if (div < 0) return div; @@ -1988,6 +1987,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, if (ret < 0) goto err; + gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); ret = gpmc_cs_program_settings(cs, &gpmc_s); if (ret < 0) goto err; diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 4b54128bc78e..a726f01e3b02 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -138,7 +138,7 @@ static void asic3_irq_flip_edge(struct asic3 *asic, spin_unlock_irqrestore(&asic->lock, flags); } -static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) +static void asic3_irq_demux(struct irq_desc *desc) { struct asic3 *asic = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index a76eb6ef47a0..b279205659a4 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -205,7 +205,7 @@ static void pcap_isr_work(struct work_struct *work) } while (gpio_get_value(pdata->gpio)); } -static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pcap_irq_handler(struct irq_desc *desc) { struct pcap_chip *pcap = irq_desc_get_handler_data(desc); diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index 9131cdcdc64a..6ccaf90d98fd 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c @@ -98,7 +98,7 @@ static struct irq_chip egpio_muxed_chip = { .irq_unmask = egpio_unmask, }; -static void egpio_handler(unsigned int irq, struct irq_desc *desc) +static void egpio_handler(struct irq_desc *desc) { struct egpio_info *ei = irq_desc_get_handler_data(desc); int irqpin; diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index f28cb28a62f8..2c7f8d7c0595 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h @@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev); .thaw = intel_lpss_resume, \ .poweroff = intel_lpss_suspend, \ .restore = intel_lpss_resume, +#else +#define INTEL_LPSS_SLEEP_PM_OPS #endif #define INTEL_LPSS_RUNTIME_PM_OPS \ diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 5bb49f08955d..798e44306382 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -65,7 +65,7 @@ struct jz4740_adc { spinlock_t lock; }; -static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) +static void jz4740_adc_irq_demux(struct irq_desc *desc) { struct irq_chip_generic *gc = irq_desc_get_handler_data(desc); uint8_t status; diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c index c52162ea3d0a..586098f1b233 100644 --- a/drivers/mfd/max77843.c +++ b/drivers/mfd/max77843.c @@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843) if (!max77843->i2c_chg) { dev_err(&max77843->i2c->dev, "Cannot allocate I2C device for Charger\n"); - return PTR_ERR(max77843->i2c_chg); + return -ENODEV; } i2c_set_clientdata(max77843->i2c_chg, max77843); diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index 59502d02cd15..1b7ec0870c2a 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c @@ -156,7 +156,7 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master) return ret; } -static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pm8xxx_irq_handler(struct irq_desc *desc) { struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); struct irq_chip *irq_chip = irq_desc_get_chip(desc); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 16fc1adc4fa3..94bd89cb1f06 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -185,7 +185,7 @@ static struct mfd_cell t7l66xb_cells[] = { /*--------------------------------------------------------------------------*/ /* Handle the T7L66XB interrupt mux */ -static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) +static void t7l66xb_irq(struct irq_desc *desc) { struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc); unsigned int isr; diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 775b9aca871a..8c84a513016b 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -522,8 +522,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) /*--------------------------------------------------------------------------*/ -static void -tc6393xb_irq(unsigned int irq, struct irq_desc *desc) +static void tc6393xb_irq(struct irq_desc *desc) { struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc); unsigned int isr; diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 9a2302129711..f691d7ecad52 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb) * SIBCLK to talk to the chip. We leave the clock running until * we have finished processing all interrupts from the chip. */ -static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc) +static void ucb1x00_irq(struct irq_desc *desc) { struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); unsigned int isr, i; diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index 6f484dfe78f9..6982f603fadc 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Werror +ccflags-y := -Werror -Wno-unused-const-variable cxl-y += main.o file.o irq.o fault.o native.o cxl-y += context.o sysfs.o debugfs.o pci.o trace.o diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 8af12c884b04..103baf0e0c5b 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -105,6 +105,7 @@ EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); void cxl_free_afu_irqs(struct cxl_context *ctx) { + afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); } EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index e762f85ee233..2faa1270d085 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -275,6 +275,9 @@ static void reclaim_ctx(struct rcu_head *rcu) if (ctx->kernelapi) kfree(ctx->mapping); + if (ctx->irq_bitmap) + kfree(ctx->irq_bitmap); + kfree(ctx); } diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 1c30ef77073d..0cfb9c129f27 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -677,6 +677,7 @@ int cxl_register_serr_irq(struct cxl_afu *afu); void cxl_release_serr_irq(struct cxl_afu *afu); int afu_register_irqs(struct cxl_context *ctx, u32 count); void afu_release_irqs(struct cxl_context *ctx, void *cookie); +void afu_irq_name_free(struct cxl_context *ctx); irqreturn_t cxl_slice_irq_err(int irq, void *data); int cxl_debugfs_init(void); diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index a30bf285b5bd..7ccd2998be92 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -120,9 +120,16 @@ int afu_release(struct inode *inode, struct file *file) __func__, ctx->pe); cxl_context_detach(ctx); - mutex_lock(&ctx->mapping_lock); - ctx->mapping = NULL; - mutex_unlock(&ctx->mapping_lock); + + /* + * Delete the context's mapping pointer, unless it's created by the + * kernel API, in which case leave it so it can be freed by reclaim_ctx() + */ + if (!ctx->kernelapi) { + mutex_lock(&ctx->mapping_lock); + ctx->mapping = NULL; + mutex_unlock(&ctx->mapping_lock); + } put_device(&ctx->afu->dev); diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 583b42afeda2..09a406058c46 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -414,7 +414,7 @@ void cxl_release_psl_irq(struct cxl_afu *afu) kfree(afu->psl_irq_name); } -static void afu_irq_name_free(struct cxl_context *ctx) +void afu_irq_name_free(struct cxl_context *ctx) { struct cxl_irq_name *irq_name, *tmp; @@ -524,7 +524,5 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); - kfree(ctx->irq_bitmap); - ctx->irq_bitmap = NULL; ctx->irq_count = 0; } diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index b37f2e8004f5..d2e75c88f4d2 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -457,6 +457,7 @@ static int activate_afu_directed(struct cxl_afu *afu) dev_info(&afu->dev, "Activating AFU directed mode\n"); + afu->num_procs = afu->max_procs_virtualised; if (afu->spa == NULL) { if (cxl_alloc_spa(afu)) return -ENOMEM; @@ -468,7 +469,6 @@ static int activate_afu_directed(struct cxl_afu *afu) cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); afu->current_mode = CXL_MODE_DIRECTED; - afu->num_procs = afu->max_procs_virtualised; if ((rc = cxl_chardev_m_afu_add(afu))) return rc; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 02c85160bfe9..85761d7eb333 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1035,6 +1035,32 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) return 0; } +/* + * Workaround a PCIe Host Bridge defect on some cards, that can cause + * malformed Transaction Layer Packet (TLP) errors to be erroneously + * reported. Mask this error in the Uncorrectable Error Mask Register. + * + * The upper nibble of the PSL revision is used to distinguish between + * different cards. The affected ones have it set to 0. + */ +static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev) +{ + int aer; + u32 data; + + if (adapter->psl_rev & 0xf000) + return; + if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))) + return; + pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data); + if (data & PCI_ERR_UNC_MALF_TLP) + if (data & PCI_ERR_UNC_INTN) + return; + data |= PCI_ERR_UNC_MALF_TLP; + data |= PCI_ERR_UNC_INTN; + pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data); +} + static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) { if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) @@ -1134,6 +1160,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = cxl_vsec_looks_ok(adapter, dev))) return rc; + cxl_fixup_malformed_tlp(adapter, dev); + if ((rc = setup_cxl_bars(dev))) return rc; @@ -1249,8 +1277,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) int slice; int rc; - pci_dev_get(dev); - if (cxl_verbose) dump_cxl_config_space(dev); diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 25868c2ec03e..02006f7109a8 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu) /* conditionally create the add the binary file for error info buffer */ if (afu->eb_len) { + sysfs_attr_init(&afu->attr_eb.attr); + afu->attr_eb.attr.name = "afu_err_buff"; afu->attr_eb.attr.mode = S_IRUGO; afu->attr_eb.size = afu->eb_len; diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 6dd16a6d153f..94b520896b18 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -48,6 +48,12 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; + + if (!cxl_adapter_link_ok(afu->adapter)) { + dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); + return false; + } + set_dma_ops(&dev->dev, &dma_direct_ops); set_dma_offset(&dev->dev, PAGE_OFFSET); diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 4b469cf9e60f..8504dbeacd3b 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -204,6 +204,8 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) if (!dir) return -ENOMEM; + dev->dbgfs_dir = dir; + f = debugfs_create_file("meclients", S_IRUSR, dir, dev, &mei_dbgfs_fops_meclients); if (!f) { @@ -228,7 +230,6 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) dev_err(dev->dev, "allow_fixed_address: registration failed\n"); goto err; } - dev->dbgfs_dir = dir; return 0; err: mei_dbgfs_deregister(dev); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8eec887c8f70..6d7c188fb65c 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) * after the host receives the enum_resp * message clients may be added or removed */ - if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || dev->hbm_state >= MEI_HBM_STOPPED) { dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b78cf5d403a3..7fc9174d4619 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2263,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) /* * eMMC hardware reset. */ -static int mmc_test_hw_reset(struct mmc_test_card *test) +static int mmc_test_reset(struct mmc_test_card *test) { struct mmc_card *card = test->card; struct mmc_host *host = card->host; int err; - if (!mmc_card_mmc(card) || !mmc_can_reset(card)) - return RESULT_UNSUP_CARD; - err = mmc_hw_reset(host); if (!err) return RESULT_OK; @@ -2605,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = { }, { - .name = "eMMC hardware reset", - .run = mmc_test_hw_reset, + .name = "Reset test", + .run = mmc_test_reset, }, }; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || - (mrq->stop && mrq->stop->error == -EILSEQ)) + (mrq->stop && mrq->stop->error == -EILSEQ))) mmc_retune_needed(host); if (err && cmd->retries && mmc_host_is_spi(host)) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) 0, &cd_gpio_invert); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; /* @@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; if (of_property_read_bool(np, "disable-wp")) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index e726903170a8..f6cd995dbe92 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1924,7 +1924,6 @@ EXPORT_SYMBOL(mmc_can_reset); static int mmc_reset(struct mmc_host *host) { struct mmc_card *card = host->card; - u32 status; if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return -EOPNOTSUPP; @@ -1937,12 +1936,6 @@ static int mmc_reset(struct mmc_host *host) host->ops->hw_reset(host); - /* If the reset has happened, then a status command will fail */ - if (!mmc_send_status(card, &status)) { - mmc_host_clk_release(host); - return -ENOSYS; - } - /* Set initial state and call mmc_set_ios */ mmc_set_initial_state(host); mmc_host_clk_release(host); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 781e4db31767..7fb0753abe30 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -182,6 +182,7 @@ struct omap_hsmmc_host { struct clk *fclk; struct clk *dbclk; struct regulator *pbias; + bool pbias_enabled; void __iomem *base; int vqmmc_enabled; resource_size_t mapbase; @@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return ret; } - if (!regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg enable fail\n"); return ret; } + host->pbias_enabled = 1; } } else { - if (regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 1) { ret = regulator_disable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg disable fail\n"); return ret; } + host->pbias_enabled = 0; } } @@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); if (IS_ERR(mmc->supply.vmmc)) { ret = PTR_ERR(mmc->supply.vmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", PTR_ERR(mmc->supply.vmmc)); @@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); if (IS_ERR(mmc->supply.vqmmc)) { ret = PTR_ERR(mmc->supply.vqmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", PTR_ERR(mmc->supply.vqmmc)); @@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) host->pbias = devm_regulator_get_optional(host->dev, "pbias"); if (IS_ERR(host->pbias)) { ret = PTR_ERR(host->pbias); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get pbias regulator %ld\n", PTR_ERR(host->pbias)); @@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->base = base + pdata->reg_offset; host->power_mode = MMC_POWER_OFF; host->next_data.cookie = 1; + host->pbias_enabled = 0; host->vqmmc_enabled = 0; ret = omap_hsmmc_gpio_init(mmc, host, pdata); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -28,6 +28,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> #include <linux/io.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> @@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); - if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { - if (host->pdata->gpio_card_ro_invert) - return !gpio_get_value(host->pdata->gpio_card_ro); - else - return gpio_get_value(host->pdata->gpio_card_ro); - } + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) + return mmc_gpio_get_ro(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* @@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) static const struct mmc_host_ops pxamci_ops = { .request = pxamci_request, + .get_cd = mmc_gpio_get_cd, .get_ro = pxamci_get_ro, .set_ios = pxamci_set_ios, .enable_sdio_irq = pxamci_enable_sdio_irq, @@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_power)) { - ret = gpio_request(gpio_power, "mmc card power"); + ret = devm_gpio_request(&pdev->dev, gpio_power, + "mmc card power"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", + gpio_power); goto out; } gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } - if (gpio_is_valid(gpio_ro)) { - ret = gpio_request(gpio_ro, "mmc card read only"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto err_gpio_ro; - } - gpio_direction_input(gpio_ro); + if (gpio_is_valid(gpio_ro)) + ret = mmc_gpio_request_ro(mmc, gpio_ro); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); + goto out; + } else { + mmc->caps |= host->pdata->gpio_card_ro_invert ? + MMC_CAP2_RO_ACTIVE_HIGH : 0; } - if (gpio_is_valid(gpio_cd)) { - ret = gpio_request(gpio_cd, "mmc card detect"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); - goto err_gpio_cd; - } - gpio_direction_input(gpio_cd); - ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "mmc card detect", mmc); - if (ret) { - dev_err(&pdev->dev, "failed to request card detect IRQ\n"); - goto err_request_irq; - } + if (gpio_is_valid(gpio_cd)) + ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); + goto out; } if (host->pdata && host->pdata->init) @@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) return 0; -err_request_irq: - gpio_free(gpio_cd); -err_gpio_cd: - gpio_free(gpio_ro); -err_gpio_ro: - gpio_free(gpio_power); - out: +out: if (host) { if (host->dma_chan_rx) dma_release_channel(host->dma_chan_rx); @@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } - if (gpio_is_valid(gpio_cd)) { - free_irq(gpio_to_irq(gpio_cd), mmc); - gpio_free(gpio_cd); - } - if (gpio_is_valid(gpio_ro)) - gpio_free(gpio_ro); - if (gpio_is_valid(gpio_power)) - gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1556643a41d..a0f05de5409f 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { static const struct sdhci_pltfm_data soc_data_sama5d2 = { .ops = &sdhci_at91_sama5d2_ops, + .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, }; static const struct of_device_id sdhci_at91_dt_match[] = { diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 946d37f94a31..f5edf9d3a18a 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev, struct sdhci_pxa *pxa = pltfm_host->priv; struct resource *res; + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; host->quirks |= SDHCI_QUIRK_MISSING_CAPS; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf-sdio3"); @@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) uhs == MMC_TIMING_UHS_DDR50) { reg_val &= ~SDIO3_CONF_CLK_INV; reg_val |= SDIO3_CONF_SD_FB_CLK; + } else if (uhs == MMC_TIMING_MMC_HS) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; } else { reg_val |= SDIO3_CONF_CLK_INV; reg_val &= ~SDIO3_CONF_SD_FB_CLK; @@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = armada_38x_quirks(pdev, host); if (ret < 0) - goto err_clk_get; + goto err_mbus_win; ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); if (ret < 0) goto err_mbus_win; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 64b7fdbd1a9c..fbc7efdddcb5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) + mdelay(1); if (clock == 0) return; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c02ff46c8ac..9d4aa31b683a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -412,6 +412,11 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* + * When internal clock is disabled, a delay is needed before modifying the + * SD clock frequency or enabling back the internal clock. + */ +#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -210,6 +210,16 @@ #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ +#define SDXC_CLK_400K 0 +#define SDXC_CLK_25M 1 +#define SDXC_CLK_50M 2 +#define SDXC_CLK_50M_DDR 3 + +struct sunxi_mmc_clk_delay { + u32 output; + u32 sample; +}; + struct sunxi_idma_des { u32 config; u32 buf_size; @@ -229,6 +239,7 @@ struct sunxi_mmc_host { struct clk *clk_mmc; struct clk *clk_sample; struct clk *clk_output; + const struct sunxi_mmc_clk_delay *clk_delays; /* irq */ spinlock_t lock; @@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* determine delays */ if (rate <= 400000) { - oclk_dly = 180; - sclk_dly = 42; + oclk_dly = host->clk_delays[SDXC_CLK_400K].output; + sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; } else if (rate <= 25000000) { - oclk_dly = 180; - sclk_dly = 75; + oclk_dly = host->clk_delays[SDXC_CLK_25M].output; + sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; } else if (rate <= 50000000) { if (ios->timing == MMC_TIMING_UHS_DDR50) { - oclk_dly = 60; - sclk_dly = 120; + oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; } else { - oclk_dly = 90; - sclk_dly = 150; + oclk_dly = host->clk_delays[SDXC_CLK_50M].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; } - } else if (rate <= 100000000) { - oclk_dly = 6; - sclk_dly = 24; - } else if (rate <= 200000000) { - oclk_dly = 3; - sclk_dly = 12; } else { return -EINVAL; } @@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", }, { .compatible = "allwinner,sun5i-a13-mmc", }, + { .compatible = "allwinner,sun9i-a80-mmc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); @@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { .hw_reset = sunxi_mmc_hw_reset, }; +static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, +}; + +static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, +}; + static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, struct platform_device *pdev) { @@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, else host->idma_des_size_bits = 16; + if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) + host->clk_delays = sun9i_mmc_clk_delays; + else + host->clk_delays = sunxi_mmc_clk_delays; + ret = mmc_regulator_get_supply(host->mmc); if (ret) { if (ret != -EPROBE_DEFER) diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 2426db88db36..f04445b992f5 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) oob_chunk_size); /* the last chunk */ - memcpy16_toio(&s[oob_chunk_size * sparebuf_size], + memcpy16_toio(&s[i * sparebuf_size], &d[i * oob_chunk_size], host->used_oobsize - i * oob_chunk_size); } diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index f97a58d6aae1..e7d333c162be 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -147,6 +147,10 @@ #define NFC_ECC_MODE GENMASK(15, 12) #define NFC_RANDOM_SEED GENMASK(30, 16) +/* NFC_USER_DATA helper macros */ +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ + ((buf)[2] << 16) | ((buf)[3] << 24)) + #define NFC_DEFAULT_TIMEOUT_MS 1000 #define NFC_SRAM_SIZE 1024 @@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, - chip->oob_poi + offset - mtd->writesize, - 4); - } + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + + layout->oobfree[i].offset), + nfc->regs + NFC_REG_USER_DATA_BASE); chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); @@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, offset += ecc->size; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, - 4); - } + writel(NFC_BUF_TO_USER_DATA(oob), + nfc->regs + NFC_REG_USER_DATA_BASE); tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | (1 << 30); @@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) node); nand_release(&chip->mtd); sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); } } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, goto bad; } + if (data_size > ubi->leb_size) { + ubi_err(ubi, "bad data_size"); + goto bad; + } + if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + err = -ENOSPC; goto out_free; } ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 10f71c732b59..816d0e94961c 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev) dev->type = ARPHRD_ARCNET; dev->netdev_ops = &arcnet_netdev_ops; dev->header_ops = &arcnet_header_ops; - dev->hard_header_len = sizeof(struct archdr); + dev->hard_header_len = sizeof(struct arc_hardware); dev->mtu = choose_mtu(); dev->addr_len = ARCNET_ALEN; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index e5fac368068a..131026fbc2d7 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 6f13f7206762..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2000,6 +2000,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) */ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + reg &= ~PORT_PCS_CTRL_UNFORCED; reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP | PORT_PCS_CTRL_DUPLEX_FULL | @@ -2050,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else reg |= PORT_CONTROL_FRAME_MODE_DSA; + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 2c063b60db4b..96f485ab612e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) pdata->debugfs_xpcs_reg = 0; buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); if (!pdata->xgbe_debugfs) { netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); + kfree(buf); return; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index cfa37041ab71..c4bb8027b3fb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev) netdev_dbg(ndev, "No phy-handle found in DT\n"); return -ENODEV; } - pdata->phy_dev = of_phy_find_device(phy_np); - } - phy_dev = pdata->phy_dev; + phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + 0, pdata->phy_mode); + if (!phy_dev) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } + + pdata->phy_dev = phy_dev; + } else { + phy_dev = pdata->phy_dev; - if (!phy_dev || - phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, - pdata->phy_mode)) { - netdev_err(ndev, "Could not connect to PHY\n"); - return -ENODEV; + if (!phy_dev || + phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, + pdata->phy_mode)) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } } pdata->phy_speed = SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index f9cb99bfb511..ffd180570920 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = { { .compatible = "snps,arc-emac" }, { /* Sentinel */ } }; +MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); static struct platform_driver emac_arc_driver = { .probe = emac_arc_probe, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b9a5a97ed4dd..f1b5364f3521 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = { { .compatible = "brcm,systemport" }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index ba936635322a..b5e64b02200c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1946,6 +1946,7 @@ struct bnx2x { u16 vlan_cnt; u16 vlan_credit; u16 vxlan_dst_port; + u8 vxlan_dst_port_count; bool accept_any_vlan; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index aeb7ce64452e..be628bd9fb18 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3351,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) udp_rss_requested = 0; else return -EINVAL; + + if (CHIP_IS_E1x(bp) && udp_rss_requested) { + DP(BNX2X_MSG_ETHTOOL, + "57710, 57711 boards don't support RSS according to UDP 4-tuple\n"); + return -EINVAL; + } + if ((info->flow_type == UDP_V4_FLOW) && (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e3da2bddf143..f1d62d5dbaff 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3705,16 +3705,14 @@ out: void bnx2x_update_mfw_dump(struct bnx2x *bp) { - struct timeval epoc; u32 drv_ver; u32 valid_dump; if (!SHMEM2_HAS(bp, drv_info)) return; - /* Update Driver load time */ - do_gettimeofday(&epoc); - SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec); + /* Update Driver load time, possibly broken in y2038 */ + SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); @@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) if (!netif_running(bp->dev)) return; - if (bp->vxlan_dst_port || !IS_PF(bp)) { + if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { + bp->vxlan_dst_port_count++; + return; + } + + if (bp->vxlan_dst_port_count || !IS_PF(bp)) { DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); return; } bp->vxlan_dst_port = port; + bp->vxlan_dst_port_count = 1; bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); } @@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev, static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) { - if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { + if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || + !IS_PF(bp)) { DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); return; } + bp->vxlan_dst_port--; + if (bp->vxlan_dst_port) + return; if (netif_running(bp->dev)) { bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index c9bd7f16018e..ff702a707a91 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp, /* RSS keys */ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { - memcpy(&data->rss_key[0], &p->rss_key[0], - sizeof(data->rss_key)); + u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key); + const u8 *src = (const u8 *)p->rss_key; + int i; + + /* Apparently, bnx2x reads this array in reverse order + * We need to byte swap rss_key to comply with Toeplitz specs. + */ + for (i = 0; i < sizeof(data->rss_key); i++) + *--dst = *src++; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fadbd0088d3e..1805541b4240 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1683,6 +1683,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + static int init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -1723,15 +1741,8 @@ static int init_umac(struct bcmgenet_priv *priv) /* Enable Tx default queue 16 interrupts */ int0_enable |= UMAC_IRQ_TXDMA_DONE; - /* Monitor cable plug/unplugged event for internal PHY */ - if (priv->internal_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->ext_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - int0_enable |= UMAC_IRQ_LINK_EVENT; - + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { reg = bcmgenet_bp_mc_get(priv); reg |= BIT(priv->hw_params->bp_in_en_shift); @@ -2645,6 +2656,9 @@ static void bcmgenet_netif_start(struct net_device *dev) netif_tx_start_all_queues(dev); + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + phy_start(priv->phydev); } @@ -3155,6 +3169,7 @@ static const struct of_device_id bcmgenet_match[] = { { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, { }, }; +MODULE_DEVICE_TABLE(of, bcmgenet_match); static int bcmgenet_probe(struct platform_device *pdev) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) } /* Flush FLI data fifo. */ -static u32 +static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; @@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) } /* Read flash status. */ -static u32 +static int bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg dev_status; - u32 status; + int status; u32 ret_status; int i; @@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) } /* Start flash read operation. */ -static u32 +static int bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { - u32 status; + int status; /* len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) @@ -1703,7 +1703,8 @@ static enum bfa_status bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { - u32 n, status; + u32 n; + int status; u32 off, l, s, residue, fifo_sz; residue = len; diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5d0753cc7e73..04b0d16b210e 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q0->rcb->id = 0; q0->rx_packets = q0->rx_bytes = 0; q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; + q0->rxbuf_map_failed = 0; bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); @@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, : rx_cfg->q1_buf_size; q1->rx_packets = q1->rx_bytes = 0; q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; + q1->rxbuf_map_failed = 0; bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, &hqpt_mem[i], &hsqpt_mem[i], diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index e0e797f2ea14..c438d032e8bf 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -587,6 +587,7 @@ struct bna_rxq { u64 rx_bytes; u64 rx_packets_with_error; u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; }; /* RxQ pair */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 506047c38607..21a0cfc3e7ec 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) } dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, - unmap_q->map_size, DMA_FROM_DEVICE); + unmap_q->map_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + put_page(page); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } unmap->page = page; unmap->page_offset = page_offset; @@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) rcb->rxq->rxbuf_alloc_failed++; goto finishing; } + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } unmap->skb = skb; dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); @@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) unmap = head_unmap; dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); txqent->vector[0].length = htons(len); dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); @@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } + dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); txqent->vector[vect_id].length = htons(size); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index faedbf24777e..f4ed816b93ee 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -175,6 +175,7 @@ struct bnad_drv_stats { u64 tx_skb_headlen_zero; u64 tx_skb_frag_zero; u64 tx_skb_len_mismatch; + u64 tx_skb_map_failed; u64 hw_stats_updates; u64 netif_rx_dropped; @@ -189,6 +190,7 @@ struct bnad_drv_stats { u64 rx_unmap_q_alloc_failed; u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; }; /* Complete driver stats */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 2bdfc5dff4b1..0e4fdc3dd729 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_skb_headlen_zero", "tx_skb_frag_zero", "tx_skb_len_mismatch", + "tx_skb_map_failed", "hw_stats_updates", "netif_rx_dropped", @@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_unmap_q_alloc_failed", "rx_unmap_q_alloc_failed", "rxbuf_alloc_failed", + "rxbuf_map_failed", "mac_stats_clr_cnt", "mac_frame_64", @@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) rx_packets_with_error; buf[bi++] = rcb->rxq-> rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; buf[bi++] = rcb->producer_index; buf[bi++] = rcb->consumer_index; } @@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) rx_packets_with_error; buf[bi++] = rcb->rxq-> rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; buf[bi++] = rcb->producer_index; buf[bi++] = rcb->consumer_index; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 8353a6cbfcc2..03ed00c49823 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 0a27805cbbbd..d463563e1f70 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -582,6 +582,7 @@ struct be_adapter { u16 pvid; __be16 vxlan_port; int vxlan_port_count; + int vxlan_port_aliases; struct phy_info phy; u8 wol_cap; bool wol_en; @@ -591,6 +592,7 @@ struct be_adapter { int be_get_temp_freq; struct be_hwmon hwmon_info; u8 pf_number; + u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index eb323913cd39..1795c935ff02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, return status; dest_wrb = be_cmd_copy(adapter, wrb); - if (!dest_wrb) - return -EBUSY; + if (!dest_wrb) { + status = -EBUSY; + goto unlock; + } if (use_mcc(adapter)) status = be_mcc_notify_wait(adapter); @@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, if (!status) memcpy(wrb, dest_wrb, sizeof(*wrb)); +unlock: be_cmd_unlock(adapter); return status; } @@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) be_if_cap_flags(adapter)); } flags &= be_if_cap_flags(adapter); + if (!flags) + return -ENOTSUPP; return __be_cmd_rx_filter(adapter, flags, value); } @@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; + adapter->pci_func_num = attribs->pci_func_num; serial_num = attribs->hba_attribs.controller_serial_number; for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & @@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; be_copy_nic_desc(res, desc); } @@ -3721,7 +3726,10 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created */ +/* Will use MBOX only if MCCQ has not been created + * non-zero domain => a PF is querying this on behalf of a VF + * zero domain => a PF or a VF is querying this for itself + */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, OPCODE_COMMON_GET_PROFILE_CONFIG, cmd.size, &wrb, &cmd); - req->hdr.domain = domain; if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When a function is querying profile information relating to + * itself hdr.pf_number must be set to it's pci_func_num + 1 + */ + req->hdr.domain = domain; + if (domain == 0) + req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter, vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS); } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); } else { num_vf_qs = 1; } + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); nic_vft->rq_count = cpu_to_le16(num_vf_qs); nic_vft->txq_count = cpu_to_le16(num_vf_qs); nic_vft->rssq_count = cpu_to_le16(num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 7d178bdb112e..91155ea74f34 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -289,7 +289,9 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd[3]; /* dword 3 */ + u8 rsvd1; /* dword 3 */ + u8 pf_num; /* dword 3 */ + u8 rsvd2; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs { struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[10]; + u32 rsvd0[2]; + u16 rsvd1; + u8 pci_func_num; + u8 rsvd2; + u32 rsvd3[7]; } __packed; struct be_cmd_req_cntl_attribs { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 12687bf52b95..eb48a977f8da 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or - * less may cause a transmit stall on that port. So the work-around is - * to pad short packets (<= 32 bytes) to a 36-byte length. + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein + * packets that are 32b or less may cause a transmit stall + * on that port. The workaround is to pad such packets + * (len <= 32 bytes) to a minimum length of 36b. */ - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb->len <= 32) { if (skb_put_padto(skb, 36)) return NULL; } @@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - status = be_cmd_query_fw_cfg(adapter); if (status) return status; @@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + /* Need to invoke this cmd first to get the PCI Function Number */ + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, return false; } - return (fhdr->asic_type_rev >= adapter->asic_rev); + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); } static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) @@ -5176,6 +5186,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; + if (adapter->vxlan_port == port && adapter->vxlan_port_count) { + adapter->vxlan_port_aliases++; + return; + } + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { dev_info(dev, "Only one UDP port supported for VxLAN offloads\n"); @@ -5226,6 +5241,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, if (adapter->vxlan_port != port) goto done; + if (adapter->vxlan_port_aliases) { + adapter->vxlan_port_aliases--; + return; + } + be_disable_vxlan_offloads(adapter); dev_info(&adapter->pdev->dev, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 3c40f6b99224..55c36230e176 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -198,11 +198,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MDIO registers (struct gfar) * This is mildly evil, but so is our hardware for doing this. * Also, we have to cast back to struct gfar because of * definition weirdness done in gianfar.h. */ -static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p) { struct gfar __iomem *enet_regs = p; @@ -210,6 +212,15 @@ static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) } /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar) + */ +static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p) +{ + return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs)); +} + +/* * Return the TBIPAR address for an eTSEC2 node */ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) @@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) /* - * Return the TBIPAR address for a QE MDIO node + * Return the TBIPAR address for a QE MDIO node, starting from the address + * of the mapped MII registers (struct fsl_pq_mii) */ static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { - struct fsl_pq_mdio __iomem *mdio = p; + struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii); return &mdio->utbipar; } @@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "fsl,gianfar-tbi", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { .compatible = "fsl,gianfar-mdio", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { @@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "gianfar", .data = &(struct fsl_pq_mdio_data) { .mii_offset = offsetof(struct fsl_pq_mdio, mii), - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mdio, }, }, { @@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) tbipa = data->get_tbipa(priv->map); + /* + * Add consistency check to make sure TBI is contained + * within the mapped range (not because we would get a + * segfault, rather to catch bugs in computing TBI + * address). Print error message but continue anyway. + */ + if ((void *)tbipa > priv->map + resource_size(&res) - 4) + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + ((void *)tbipa - priv->map) + 4); + iowrite32be(be32_to_cpup(prop), tbipa); } } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4b69d061d90f..710715fcb23d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); @@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev) phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); + + put_device(&tbiphy->dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) @@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, - IRQF_NO_SUSPEND, + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, gfar_irq(grp, ER)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, ER)->irq); + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, gfar_irq(grp, TX)->name, grp); if (err < 0) { @@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto rx_irq_fail; } } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, - IRQF_NO_SUSPEND, + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, gfar_irq(grp, TX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", gfar_irq(grp, TX)->irq); goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, TX)->irq); } return 0; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 8e3cd77aa347..664d0c261269 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; +MODULE_DEVICE_TABLE(of, match_table); static struct platform_driver gianfar_ptp_driver = { .driver = { diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 4dd40e057f40..650f7888e32b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value = phy_read(tbiphy, ENET_TBI_MII_CR); value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); @@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); + + put_device(&tbiphy->dev); } /* Configure the PHY for dev. diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) struct net_device *ndev; struct hip04_priv *priv; struct resource *res; - unsigned int irq; + int irq; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { u32 index; }; -#define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4SYNC_ETHTOOL_REGS_VER 2 +#define EMAC_ETHTOOL_REGS_VER 3 +#define EMAC4_ETHTOOL_REGS_VER 4 +#define EMAC4SYNC_ETHTOOL_REGS_VER 5 #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..c0e943aecd13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1014,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..dd44fafd8798 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif @@ -8388,6 +8389,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8395,6 +8397,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..a23ebfd5cd25 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { @@ -948,6 +955,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fe2299ac4f5c..514df76fc70f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); struct sk_buff *skb; unsigned char *data; + dma_addr_t phys_addr; u32 rx_status; int rx_bytes, err; @@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; + phys_addr = rx_desc->buf_phys_addr; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { @@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, if (!skb) goto err_drop_frame; - dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, + dma_unmap_single(dev->dev.parent, phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); rcvd_pkts++; @@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev) struct phy_device *phy = of_phy_find_device(dn); mvneta_fixed_link_update(pp, phy); + + put_device(&phy->dev); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4402a1e48c9b..e7a5000aa12c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1047,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { - int cpu_curr; const struct cpumask *aff; + struct irq_data *idata; + int cpu_curr; INC_PERF_COUNTER(priv->pstats.napi_quota); cpu_curr = smp_processor_id(); - aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + idata = irq_desc_get_irq_data(cq->irq_desc); + aff = irq_data_get_affinity_mask(idata); if (likely(cpumask_test_cpu(cpu_curr, aff))) return budget; @@ -1268,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) rss_context->hash_fn = MLX4_RSS_HASH_TOP; memcpy(rss_context->rss_key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); - netdev_rss_key_fill(rss_context->rss_key, - MLX4_EN_RSS_KEY_SIZE); } else { en_err(priv, "Unknown RSS hash function requested\n"); err = -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e81e53c370e..c34488479365 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) * and performing a NOP command */ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Make sure request_irq was called */ + if (!priv->eq_table.eq[i].have_irq) + continue; + /* Temporary use polling for command completions */ mlx4_cmd_use_polling(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 006757f80988..cc3a9897574c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2669,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) if (msi_x) { int nreq = dev->caps.num_ports * num_online_cpus() + 1; - bool shared_ports = false; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); - if (nreq > MAX_MSIX) { + if (nreq > MAX_MSIX) nreq = MAX_MSIX; - shared_ports = true; - } entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) @@ -2699,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, dev->caps.num_ports); - if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) - shared_ports = true; - for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { if (i == MLX4_EQ_ASYNC) continue; @@ -2709,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) priv->eq_table.eq[i].irq = entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; - if (shared_ports) { + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); /* We don't set affinity hint when there diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1184,10 +1184,11 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + err = new_steering_entry(dev, port, steer, + index, qp->qpn); else - existing_steering_entry(dev, port, steer, - index, qp->qpn); + err = existing_steering_entry(dev, port, steer, + index, qp->qpn); } if (err && link && index != -1) { if (index < dev->caps.num_mgms) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index e71563ce05d1..22d603f78273 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - if (enable_promisc) + if (enable_promisc) { mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } if (enable_allmulti) mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) @@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); if (disable_allmulti) mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) return err; } - -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) -{ - struct mlx5_cmd_query_special_contexts_mbox_in in; - struct mlx5_cmd_query_special_contexts_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - *rsvd_lkey = be32_to_cpu(out.resd_lkey); - - return err; -} -EXPORT_SYMBOL(mlx5_core_query_special_context); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 821caaab9bfb..3b9480fa3403 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -311,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int err; memset(in, 0, sizeof(in)); - MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(pvlc_reg, in, local_port, local_port); err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index dbcaf5df8967..28c19cc1a17c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, int err; int ret; + mlxsw_core->emad.trans_active = true; + err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); if (err) { dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", mlxsw_core->emad.tid); dev_kfree_skb(skb); - return err; + goto trans_inactive_out; } - mlxsw_core->emad.trans_active = true; ret = wait_event_timeout(mlxsw_core->emad.wait, !(mlxsw_core->emad.trans_active), msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); if (!ret) { dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", mlxsw_core->emad.tid); - mlxsw_core->emad.trans_active = false; - return -EIO; + err = -EIO; + goto trans_inactive_out; } return 0; + +trans_inactive_out: + mlxsw_core->emad.trans_active = false; + return err; } static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index ffd55d030ce2..36fb1cec53c9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -187,6 +187,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) { u16 max_index, be_index; u16 offset; /* byte offset inside the array */ + u8 in_byte_index; BUG_ON(index && !item->element_size); if (item->offset % sizeof(u32) != 0 || @@ -199,7 +200,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) max_index = (item->size.bytes << 3) / item->element_size - 1; be_index = max_index - index; offset = be_index * item->element_size >> 3; - *shift = index % (BITS_PER_BYTE / item->element_size) << 1; + in_byte_index = index % (BITS_PER_BYTE / item->element_size); + *shift = in_byte_index * item->element_size; return item->offset + offset; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 462cea31ecbb..cef866c37648 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1582,11 +1582,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, if (in_mbox) memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3e52ee93438c..62cbbd1ada8d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1069,9 +1069,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) return 0; err_register_netdev: -err_port_admin_status_set: err_port_mac_learning_mode_set: err_port_stp_state_set: +err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: err_port_swid_set: diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 66d4ab703f45..60f43ec22175 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = { { .compatible = "micrel,ks8851" }, { } }; +MODULE_DEVICE_TABLE(of, ks8851_match_table); static struct spi_driver ks8851_driver = { .driver = { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index becbb5f1f5a7..a10c928bbd6b 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = { { .compatible = "moxa,moxart-mac" }, { } }; +MODULE_DEVICE_TABLE(of, moxart_mac_match); static struct platform_driver moxart_mac_driver = { .probe = moxart_mac_probe, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 66fd868152e5..b159ef8303cc 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, - bool enable) +static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) { if (enable) - clk_enable(pldat->clk); + clk_prepare_enable(pldat->clk); else - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); } static void __lpc_params_setup(struct netdata_local *pldat) @@ -1494,7 +1493,7 @@ err_out_free_irq: err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); @@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) iounmap(pldat->net_base); mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); @@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev, if (netif_running(ndev)) { netif_device_detach(ndev); __lpc_eth_shutdown(pldat); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); /* * Reset again now clock is disable to be sure diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 06bcc734fe8d..d6696cfa11d2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -536,6 +536,7 @@ struct qlcnic_hardware_context { u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; u8 lb_mode; + u8 vxlan_port_count; u16 vxlan_port; struct device *hwmon_dev; u32 post_mode; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 8b08b20e8b30..d4481454b5f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev, /* Adapter supports only one VXLAN port. Use very first port * for enabling offload */ - if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) + if (!qlcnic_encap_rx_offload(adapter)) return; + if (!ahw->vxlan_port_count) { + ahw->vxlan_port_count = 1; + ahw->vxlan_port = ntohs(port); + adapter->flags |= QLCNIC_ADD_VXLAN_PORT; + return; + } + if (ahw->vxlan_port == ntohs(port)) + ahw->vxlan_port_count++; - ahw->vxlan_port = ntohs(port); - adapter->flags |= QLCNIC_ADD_VXLAN_PORT; } static void qlcnic_del_vxlan_port(struct net_device *netdev, @@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; - if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || (ahw->vxlan_port != ntohs(port))) return; - adapter->flags |= QLCNIC_DEL_VXLAN_PORT; + ahw->vxlan_port_count--; + if (!ahw->vxlan_port_count) + adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } static netdev_features_t qlcnic_features_check(struct sk_buff *skb, diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index d79e33b3c191..686334f4588d 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -157,6 +157,7 @@ enum { NWayAdvert = 0x66, /* MII ADVERTISE */ NWayLPAR = 0x68, /* MII LPA */ NWayExpansion = 0x6A, /* MII Expansion */ + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */ Config5 = 0xD8, /* Config5 */ TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ @@ -341,6 +342,7 @@ struct cp_private { unsigned tx_tail; struct cp_desc *tx_ring; struct sk_buff *tx_skb[CP_TX_RING_SIZE]; + u32 tx_opts[CP_TX_RING_SIZE]; unsigned rx_buf_sz; unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ @@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp) BUG_ON(!skb); dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), - le32_to_cpu(txd->opts1) & 0xffff, + cp->tx_opts[tx_tail] & 0xffff, PCI_DMA_TODEVICE); if (status & LastFrag) { @@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, { struct cp_private *cp = netdev_priv(dev); unsigned entry; - u32 eor, flags; + u32 eor, opts1; unsigned long intr_flags; __le32 opts2; int mss = 0; @@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mss = skb_shinfo(skb)->gso_size; opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); + opts1 = DescOwn; + if (mss) + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + if (ip->protocol == IPPROTO_TCP) + opts1 |= IPCS | TCPCS; + else if (ip->protocol == IPPROTO_UDP) + opts1 |= IPCS | UDPCS; + else { + WARN_ONCE(1, + "Net bug: asked to checksum invalid Legacy IP packet\n"); + goto out_dma_error; + } + } if (skb_shinfo(skb)->nr_frags == 0) { struct cp_desc *txd = &cp->tx_ring[entry]; @@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->addr = cpu_to_le64(mapping); wmb(); - flags = eor | len | DescOwn | FirstFrag | LastFrag; - - if (mss) - flags |= LargeSend | ((mss & MSSMask) << MSSShift); - else if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = ip_hdr(skb); - if (ip->protocol == IPPROTO_TCP) - flags |= IPCS | TCPCS; - else if (ip->protocol == IPPROTO_UDP) - flags |= IPCS | UDPCS; - else - WARN_ON(1); /* we need a WARN() */ - } + opts1 |= eor | len | FirstFrag | LastFrag; - txd->opts1 = cpu_to_le32(flags); + txd->opts1 = cpu_to_le32(opts1); wmb(); cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); + cp->tx_opts[entry] = opts1; + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", + entry, skb->len); } else { struct cp_desc *txd; - u32 first_len, first_eor; + u32 first_len, first_eor, ctrl; dma_addr_t first_mapping; int frag, first_entry = entry; - const struct iphdr *ip = ip_hdr(skb); /* We must give this initial chunk to the device last. * Otherwise we could race with the device. @@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, goto out_dma_error; cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; - u32 ctrl; dma_addr_t mapping; + entry = NEXT_TX(entry); + len = skb_frag_size(this_frag); mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), @@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; - ctrl = eor | len | DescOwn; - - if (mss) - ctrl |= LargeSend | - ((mss & MSSMask) << MSSShift); - else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (ip->protocol == IPPROTO_TCP) - ctrl |= IPCS | TCPCS; - else if (ip->protocol == IPPROTO_UDP) - ctrl |= IPCS | UDPCS; - else - BUG(); - } + ctrl = opts1 | eor | len; if (frag == skb_shinfo(skb)->nr_frags - 1) ctrl |= LastFrag; @@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->opts1 = cpu_to_le32(ctrl); wmb(); + cp->tx_opts[entry] = ctrl; cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); } txd = &cp->tx_ring[first_entry]; @@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->addr = cpu_to_le64(first_mapping); wmb(); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (ip->protocol == IPPROTO_TCP) - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn | - IPCS | TCPCS); - else if (ip->protocol == IPPROTO_UDP) - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn | - IPCS | UDPCS); - else - BUG(); - } else - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn); + ctrl = opts1 | first_eor | first_len | FirstFrag; + txd->opts1 = cpu_to_le32(ctrl); wmb(); + + cp->tx_opts[first_entry] = ctrl; + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", + first_entry, entry, skb->len); } - cp->tx_head = entry; + cp->tx_head = NEXT_TX(entry); netdev_sent_queue(dev, skb->len); - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", - entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); @@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp) { memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); + memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); cp_init_rings_index(cp); @@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp) desc = cp->rx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb(cp->rx_skb[i]); + dev_kfree_skb_any(cp->rx_skb[i]); } } @@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp) le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; } } @@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp) memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); @@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; - int rc; + int rc, i; netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", cpr8(Cmd), cpr16(CpCmd), @@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev) spin_lock_irqsave(&cp->lock, flags); + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); + for (i = 0; i < CP_TX_RING_SIZE; i++) { + netif_dbg(cp, tx_err, cp->dev, + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n", + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), + le64_to_cpu(cp->tx_ring[i].addr), + cp->tx_skb[i]); + } + cp_stop_hw(cp); cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); - cp_enable_irq(cp); + __cp_set_rx_mode(dev); + cpw16_f(IntrMask, cp_norx_intr_mask); netif_wake_queue(dev); + napi_schedule_irqoff(&cp->napi); spin_unlock_irqrestore(&cp->lock, flags); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - u16 rg_saw_cnt; + int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b735fa22ac95..ebf6abc4853f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus) if (!gpio_request(reset_gpio, "mdio-reset")) { gpio_direction_output(reset_gpio, active_low ? 1 : 0); - udelay(data->delays[0]); + if (data->delays[0]) + msleep(DIV_ROUND_UP(data->delays[0], 1000)); + gpio_set_value(reset_gpio, active_low ? 0 : 1); - udelay(data->delays[1]); + if (data->delays[1]) + msleep(DIV_ROUND_UP(data->delays[1], 1000)); + gpio_set_value(reset_gpio, active_low ? 1 : 0); - udelay(data->delays[2]); + if (data->delays[2]) + msleep(DIV_ROUND_UP(data->delays[2], 1000)); } } #endif diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 53fe200e0b79..cc106d892e29 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = { #endif }; -static struct vnet *vnet_new(const u64 *local_mac) +static struct vnet *vnet_new(const u64 *local_mac, + struct vio_dev *vdev) { struct net_device *dev; struct vnet *vp; @@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac) NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; + SET_NETDEV_DEV(dev, &vdev->dev); + err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); @@ -1808,7 +1811,8 @@ err_out_free_dev: return ERR_PTR(err); } -static struct vnet *vnet_find_or_create(const u64 *local_mac) +static struct vnet *vnet_find_or_create(const u64 *local_mac, + struct vio_dev *vdev) { struct vnet *iter, *vp; @@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) } } if (!vp) - vp = vnet_new(local_mac); + vp = vnet_new(local_mac, vdev); mutex_unlock(&vnet_list_mutex); return vp; @@ -1848,7 +1852,8 @@ static void vnet_cleanup(void) static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, - u64 port_node) + u64 port_node, + struct vio_dev *vdev) { const u64 *local_mac = NULL; u64 a; @@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp, if (!local_mac) return ERR_PTR(-ENODEV); - return vnet_find_or_create(local_mac); + return vnet_find_or_create(local_mac, vdev); } static struct ldc_channel_config vnet_ldc_cfg = { @@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); - vp = vnet_find_parent(hp, vdev->mp); + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); err = PTR_ERR(vp); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1a5aca55ea9f..9f9832f0dea9 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface_list) { struct netcp_intf_modpriv *intf_modpriv; - /* If interface not registered then register now */ - if (!netcp_intf->netdev_registered) - ret = netcp_register_interface(netcp_intf); - - if (ret) - return -ENODEV; - intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), GFP_KERNEL); if (!intf_modpriv) @@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface = of_parse_phandle(netcp_intf->node_interface, module->name, 0); + if (!interface) { + devm_kfree(dev, intf_modpriv); + continue; + } + intf_modpriv->netcp_priv = netcp_intf; intf_modpriv->netcp_module = module; list_add_tail(&intf_modpriv->intf_list, @@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device, continue; } } + + /* Now register the interface with netdev */ + list_for_each_entry(netcp_intf, + &netcp_device->interface_head, + interface_list) { + /* If interface not registered then register now */ + if (!netcp_intf->netdev_registered) { + ret = netcp_register_interface(netcp_intf); + if (ret) + return -ENODEV; + } + } return 0; } @@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module) if (ret < 0) goto fail; } - mutex_unlock(&netcp_modules_lock); return 0; @@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp) netcp->rx_pool = NULL; } -static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) +static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) { struct knav_dma_desc *hwdesc; unsigned int buf_len, dma_sz; @@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) hwdesc = knav_pool_desc_get(netcp->rx_pool); if (IS_ERR_OR_NULL(hwdesc)) { dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); - return; + return -ENOMEM; } if (likely(fdq == 0)) { @@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, &dma_sz); knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); - return; + return 0; fail: knav_pool_desc_put(netcp->rx_pool, hwdesc); + return -ENOMEM; } /* Refill Rx FDQ with descriptors & attached buffers */ static void netcp_rxpool_refill(struct netcp_intf *netcp) { u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; - int i; + int i, ret = 0; /* Calculate the FDQ deficit and refill */ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { fdq_deficit[i] = netcp->rx_queue_depths[i] - knav_queue_get_count(netcp->rx_fdq[i]); - while (fdq_deficit[i]--) - netcp_allocate_rx_buf(netcp, i); + while (fdq_deficit[i]-- && !ret) + ret = netcp_allocate_rx_buf(netcp, i); } /* end for fdqs */ } @@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) packets = netcp_process_rx_packets(netcp, budget); + netcp_rxpool_refill(netcp); if (packets < budget) { napi_complete(&netcp->rx_napi); knav_queue_enable_notify(netcp->rx_queue); } - netcp_rxpool_refill(netcp); return packets; } @@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->del_addr) @@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); netcp_addr_del(netcp, naddr); } } @@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); + for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->add_addr) @@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) error = module->add_addr(priv->module_priv, naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); } } @@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); + spin_lock(&netcp->lock); /* first clear all marks */ netcp_addr_clear_mark(netcp); @@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) /* finally sweep and callout into modules */ netcp_addr_sweep_del(netcp); netcp_addr_sweep_add(netcp); + spin_unlock(&netcp->lock); } static void netcp_free_navigator_resources(struct netcp_intf *netcp) @@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev) goto fail; } - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->open) { @@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev) } } } - mutex_unlock(&netcp_modules_lock); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); @@ -1642,7 +1649,6 @@ fail_open: if (module->close) module->close(intf_modpriv->module_priv, ndev); } - mutex_unlock(&netcp_modules_lock); fail: netcp_free_navigator_resources(netcp); @@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev) napi_disable(&netcp->rx_napi); napi_disable(&netcp->tx_napi); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->close) { @@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev) dev_err(netcp->ndev_dev, "Close failed\n"); } } - mutex_unlock(&netcp_modules_lock); /* Recycle Rx descriptors from completion queue */ netcp_empty_rx_queue(netcp); @@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (!module->ioctl) @@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, } out: - mutex_unlock(&netcp_modules_lock); return (ret == 0) ? 0 : err; } @@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if ((module->add_vid) && (vid != 0)) { @@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); + return err; } @@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->del_vid) { @@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); return err; } @@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev) struct device_node *child, *interfaces; struct netcp_device *netcp_device; struct device *dev = &pdev->dev; - struct netcp_module *module; int ret; if (!node) { @@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev) /* Add the device instance to the list */ list_add_tail(&netcp_device->device_list, &netcp_devices); - /* Probe & attach any modules already registered */ - mutex_lock(&netcp_modules_lock); - for_each_netcp_module(module) { - ret = netcp_module_probe(netcp_device, module); - if (ret < 0) - dev_err(dev, "module(%s) probe failed\n", module->name); - } - mutex_unlock(&netcp_modules_lock); return 0; probe_quit_interface: diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 6f16d6aaf7b7..6bff8d82ceab 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -77,6 +77,7 @@ #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 #define GBENU_NUM_ALE_ENTRIES 1024 +#define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" @@ -149,8 +150,8 @@ #define XGBE_STATS2_MODULE 2 /* s: 0-based slave_port */ -#define SGMII_BASE(s) \ - (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) +#define SGMII_BASE(d, s) \ + (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs) #define GBE_TX_QUEUE 648 #define GBE_TXHOOK_ORDER 0 @@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, return; if (!SLAVE_LINK_IS_XGMII(slave)) { - if (gbe_dev->ss_version == GBE_SS_VERSION_14) - sgmii_link_state = - netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); - else - sgmii_link_state = - netcp_sgmii_get_port_link( - gbe_dev->sgmii_port_regs, sp); + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); } phy_link_state = gbe_phy_link_status(slave); @@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, static void gbe_sgmii_rtreset(struct gbe_priv *priv, struct gbe_slave *slave, bool set) { - void __iomem *sgmii_port_regs; - if (SLAVE_LINK_IS_XGMII(slave)) return; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; - else - sgmii_port_regs = priv->sgmii_port_regs; - - netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); + netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num), + slave->slave_num, set); } static void gbe_slave_stop(struct gbe_intf *intf) @@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf) static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) { - void __iomem *sgmii_port_regs; - - sgmii_port_regs = priv->sgmii_port_regs; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; + if (SLAVE_LINK_IS_XGMII(slave)) + return; - if (!SLAVE_LINK_IS_XGMII(slave)) { - netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); - netcp_sgmii_config(sgmii_port_regs, slave->slave_num, - slave->link_interface); - } + netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num); + netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num, + slave->link_interface); } static int gbe_slave_open(struct gbe_intf *gbe_intf) @@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->switch_regs = regs; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + + /* Although sgmii modules are mem mapped to one contiguous + * region on GBENU devices, setting sgmii_port34_regs allows + * consistent code when accessing sgmii api + */ + gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs + + (2 * GBENU_SGMII_MODULE_SIZE); + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; for (i = 0; i < (gbe_dev->max_num_ports); i++) diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 2f1264b882b9..d3d094742a7e 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_VIA config VIA_RHINE tristate "VIA Rhine support" - depends on (PCI || OF_IRQ) + depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP) depends on HAS_DMA select CRC32 select MII diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index a83263743665..2b7550c43f78 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit) } skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, dev); rhine_rx_vlan_tag(skb, desc, data_size); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); u64_stats_update_begin(&rp->rx_stats.syncp); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 6008eee01a33..cf468c87ce57 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) if (!phydev) dev_info(dev, "MDIO of the phy is not registered yet\n"); + else + put_device(&phydev->dev); return 0; } diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index b5f4a78da828..2d3848c9dc35 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work) set_bit(epidx, &irq_bit); break; } - } - - hw->ep_shm_info[epidx].es_status = info[epidx].es_status; - hw->ep_shm_info[epidx].zone = info[epidx].zone; + hw->ep_shm_info[epidx].es_status = + info[epidx].es_status; + hw->ep_shm_info[epidx].zone = info[epidx].zone; + } break; } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index da3259ce7c8d..cde29f8a37bf 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) __be32 addr; int err; + iph = ip_hdr(skb); /* outer IP header... */ + if (gs->collect_md) { static u8 zero_vni[3]; @@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) addr = 0; } else { vni = gnvh->vni; - iph = ip_hdr(skb); /* Still outer IP header... */ addr = iph->saddr; } @@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) skb_reset_network_header(skb); - iph = ip_hdr(skb); /* Now inner IP header... */ err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { @@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) struct geneve_sock *gs = geneve->sock; struct ip_tunnel_info *info = NULL; struct rtable *rt = NULL; + const struct iphdr *iip; /* interior IP header */ struct flowi4 fl4; __u8 tos, ttl; __be16 sport; @@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); + iip = ip_hdr(skb); + if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; @@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(err)) goto err; - tos = key->tos; + tos = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { - const struct iphdr *iip; /* interior IP header */ - udp_csum = false; err = geneve_build_skb(rt, skb, 0, geneve->vni, 0, NULL, udp_csum); if (unlikely(err)) goto err; - iip = ip_hdr(skb); tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); ttl = geneve->ttl; if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) @@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev) dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; - dev->vlan_features = dev->features; - dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; @@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, static int geneve_configure(struct net *net, struct net_device *dev, __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, - __u16 dst_port, bool metadata) + __be16 dst_port, bool metadata) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->ttl = ttl; geneve->tos = tos; - geneve->dst_port = htons(dst_port); + geneve->dst_port = dst_port; geneve->collect_md = metadata; - t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, + t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, &tun_on_same_port, &tun_collect_md); if (t) return -EBUSY; @@ -871,17 +867,17 @@ static int geneve_configure(struct net *net, struct net_device *dev, static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - __u16 dst_port = GENEVE_UDP_PORT; + __be16 dst_port = htons(GENEVE_UDP_PORT); __u8 ttl = 0, tos = 0; bool metadata = false; - __be32 rem_addr; - __u32 vni; + __be32 rem_addr = 0; + __u32 vni = 0; - if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) - return -EINVAL; + if (data[IFLA_GENEVE_ID]) + vni = nla_get_u32(data[IFLA_GENEVE_ID]); - vni = nla_get_u32(data[IFLA_GENEVE_ID]); - rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); + if (data[IFLA_GENEVE_REMOTE]) + rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (data[IFLA_GENEVE_TTL]) ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); @@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_PORT]) - dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); + dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); if (data[IFLA_GENEVE_COLLECT_METADATA]) metadata = true; @@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ - nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ + nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 0; } @@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) + if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) goto nla_put_failure; if (geneve->collect_md) { @@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); + err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 58ae11a14bb6..64bb44d5d867 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) { struct ali_ircc_cb *self = priv; - unsigned long flags; int iobase; int fcr; /* FIFO control reg */ int lcr; /* Line control reg */ @@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) /* Update accounting for new speed */ self->io.speed = speed; - spin_lock_irqsave(&self->lock, flags); - divisor = 115200/speed; fcr = UART_FCR_ENABLE_FIFO; @@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) /* without this, the connection will be broken after come back from FIR speed, but with this, the SIR connection is harder to established */ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); - - spin_unlock_irqrestore(&self->lock, flags); - } static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index edd77342773a..248478c6f6e4 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return 0; case TUNSETSNDBUF: - if (get_user(u, up)) + if (get_user(s, sp)) return -EFAULT; - q->sk.sk_sndbuf = u; + q->sk.sk_sndbuf = s; return 0; case TUNGETVNETHDRSZ: diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c5ad98ace5d0..11e3975485c1 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -168,8 +168,6 @@ config MDIO_OCTEON busses. It is required by the Octeon and ThunderX ethernet device drivers. - If in doubt, say Y. - config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index fb1299c6326e..e23bf5b90e17 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || !phydev->bus) + if (!phydev || phydev->bus != fmb->mii_bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e6897b6a8a53..5de8d5827536 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev) int adv; int err; int lpa; + int lpagb; int status = 0; /* Update the link, but return if there @@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev) if (lpa < 0) return lpa; + lpagb = phy_read(phydev, MII_STAT1000); + if (lpagb < 0) + return lpagb; + adv = phy_read(phydev, MII_ADVERTISE); if (adv < 0) return adv; + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | + mii_lpa_to_ethtool_lpa_t(lpa); + lpa &= adv; if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) @@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev) phydev->speed = SPEED_10; phydev->pause = phydev->asym_pause = 0; + phydev->lp_advertising = 0; } return 0; diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 6a52a7f0fa0d..4bde5e728fe0 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,unimac-mdio", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7dc21e56a7aa..3bc9f03349f3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = { { .compatible = "virtual,mdio-gpio", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 4d4d25efc1e1..280c7c311f72 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev, if (!parent_bus_node) return -ENODEV; - parent_bus = of_mdio_find_bus(parent_bus_node); - if (parent_bus == NULL) { - ret_val = -EPROBE_DEFER; - goto err_parent_bus; - } - pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); if (pb == NULL) { ret_val = -ENOMEM; goto err_parent_bus; } + parent_bus = of_mdio_find_bus(parent_bus_node); + if (parent_bus == NULL) { + ret_val = -EPROBE_DEFER; + goto err_parent_bus; + } + pb->switch_data = data; pb->switch_fn = switch_fn; pb->current_child = -1; @@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev, dev_info(dev, "Version " DRV_VERSION "\n"); return 0; } + + /* balance the reference of_mdio_find_bus() took */ + put_device(&pb->mii_bus->dev); + err_parent_bus: of_node_put(parent_bus_node); return ret_val; @@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle) mdiobus_free(cb->mii_bus); cb = cb->next; } + + /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */ + put_device(&pb->mii_bus->dev); } EXPORT_SYMBOL_GPL(mdio_mux_uninit); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 02a4615b65f8..12f44c53cc8e 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. * - * Returns a pointer to the mii_bus, or NULL if none found. + * Returns a reference to the mii_bus, or NULL if none found. The + * embedded struct device will have its reference count incremented, + * and this must be put once the bus is finished with. * * Because the association of a device_node and mii_bus is made via * of_mdiobus_register(), the mii_bus cannot be found before it is @@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, #endif /** - * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus + * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus + * @owner: module containing bus accessor functions * * Description: Called by a bus driver to bring up all the PHYs - * on a given bus, and attach them to the bus. + * on a given bus, and attach them to the bus. Drivers should use + * mdiobus_register() rather than __mdiobus_register() unless they + * need to pass a specific owner module. * * Returns 0 on success or < 0 on error. */ -int mdiobus_register(struct mii_bus *bus) +int __mdiobus_register(struct mii_bus *bus, struct module *owner) { int i, err; @@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus) BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); + bus->owner = owner; bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; @@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus) error: while (--i >= 0) { - if (bus->phy_map[i]) - device_unregister(&bus->phy_map[i]->dev); + struct phy_device *phydev = bus->phy_map[i]; + if (phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } } device_del(&bus->dev); return err; } -EXPORT_SYMBOL(mdiobus_register); +EXPORT_SYMBOL(__mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { @@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus) bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { - if (bus->phy_map[i]) - device_unregister(&bus->phy_map[i]->dev); - bus->phy_map[i] = NULL; + struct phy_device *phydev = bus->phy_map[i]; + if (phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } } device_del(&bus->dev); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c0f211127274..f761288abe66 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev) EXPORT_SYMBOL(phy_device_register); /** + * phy_device_remove - Remove a previously registered phy device from the MDIO bus + * @phydev: phy_device structure to remove + * + * This doesn't free the phy_device itself, it merely reverses the effects + * of phy_device_register(). Use phy_device_free() to free the device + * after calling this function. + */ +void phy_device_remove(struct phy_device *phydev) +{ + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; + + device_del(&phydev->dev); + bus->phy_map[addr] = NULL; +} +EXPORT_SYMBOL(phy_device_remove); + +/** * phy_find_first - finds the first PHY device on the bus * @bus: the target MII bus */ @@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw); * generic driver is used. The phy_device is given a ptr to * the attaching device, and given a callback for link status * change. The phy_device is returned to the attaching driver. + * This function takes a reference on the phy device. */ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface) { + struct mii_bus *bus = phydev->bus; struct device *d = &phydev->dev; - struct module *bus_module; int err; + if (!try_module_get(bus->owner)) { + dev_err(&dev->dev, "failed to get the bus module\n"); + return -EIO; + } + + get_device(d); + /* Assume that if there is no driver, that it doesn't * exist, and we should use the genphy driver. */ @@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = device_bind_driver(d); if (err) - return err; + goto error; } if (phydev->attached_dev) { dev_err(&dev->dev, "PHY already attached\n"); - return -EBUSY; - } - - /* Increment the bus module reference count */ - bus_module = phydev->bus->dev.driver ? - phydev->bus->dev.driver->owner : NULL; - if (!try_module_get(bus_module)) { - dev_err(&dev->dev, "failed to get the bus module\n"); - return -EIO; + err = -EBUSY; + goto error; } phydev->attached_dev = dev; @@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phy_resume(phydev); return err; + +error: + put_device(d); + module_put(bus->owner); + return err; } EXPORT_SYMBOL(phy_attach_direct); @@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach); /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct + * + * This detaches the phy device from its network device and the phy + * driver, and drops the reference count taken in phy_attach_direct(). */ void phy_detach(struct phy_device *phydev) { + struct mii_bus *bus; int i; - if (phydev->bus->dev.driver) - module_put(phydev->bus->dev.driver->owner); - phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); @@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev) break; } } + + /* + * The phydev might go away on the put_device() below, so avoid + * a use-after-free bug by reading the underlying bus first. + */ + bus = phydev->bus; + + put_device(&phydev->dev); + module_put(bus->owner); } EXPORT_SYMBOL(phy_detach); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 17cad185169d..76cad712ddb2 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -66,7 +66,6 @@ #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8574 0x000704a0 -#define PHY_ID_VSC8641 0x00070431 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = { .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, }, { - .phy_id = PHY_ID_VSC8641, - .name = "Vitesse VSC8641", - .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, - .config_init = &vsc824x_config_init, - .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, - .ack_interrupt = &vsc824x_ack_interrupt, - .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, -}, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, @@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8244, 0x000fffc0 }, { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, - { PHY_ID_VSC8641, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0481daf9201a..ed00446759b2 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, */ dev_net_set(dev, net); + rtnl_lock(); mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { @@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, ppp->file.index = unit; sprintf(dev->name, "ppp%d", unit); - ret = register_netdev(dev); + ret = register_netdevice(dev); if (ret != 0) { unit_put(&pn->units_idr, unit); netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", @@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, atomic_inc(&ppp_unit_count); mutex_unlock(&pn->all_ppp_mutex); + rtnl_unlock(); *retp = 0; return ppp; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3837ae344f63..2ed75060da50 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev) if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppox_unbind_sock(sk); - sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 1610b79ae386..e66805eeffb4 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -164,6 +164,7 @@ config USB_NET_AX8817X * Aten UC210T * ASIX AX88172 * Billionton Systems, USB2AR + * Billionton Systems, GUSB2AM-1G-B * Buffalo LUA-U2-KTX * Corega FEther USB2-TX * D-Link DUB-E100 @@ -583,4 +584,15 @@ config USB_VL600 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 +config USB_NET_CH9200 + tristate "QingHeng CH9200 USB ethernet support" + depends on USB_USBNET + select MII + help + Choose this option if you have a USB ethernet adapter with a QinHeng + CH9200 chipset. + + To compile this driver as a module, choose M here: the + module will be called ch9200. + endif # USB_NET_DRIVERS diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index cf6a0e610a7f..b5f04068dbe4 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o obj-$(CONFIG_USB_VL600) += lg-vl600.o obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o - +obj-$(CONFIG_USB_NET_CH9200) += ch9200.o diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 75d6f26729a3..079069a060a6 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -91,8 +91,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, } rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, rx->size); - if (!rx->ax_skb) + if (!rx->ax_skb) { + rx->size = 0; return 0; + } } if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 1173a24feda3..5cabefc23494 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -959,6 +959,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x08dd, 0x90ff), .driver_info = (unsigned long) &ax8817x_info, }, { + // Billionton Systems, GUSB2AM-1G-B + USB_DEVICE(0x08dd, 0x0114), + .driver_info = (unsigned long) &ax88178_info, +}, { // ATEN UC210T USB_DEVICE (0x0557, 0x2009), .driver_info = (unsigned long) &ax8817x_info, diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c new file mode 100644 index 000000000000..5e151e6a3e09 --- /dev/null +++ b/drivers/net/usb/ch9200.c @@ -0,0 +1,432 @@ +/* + * USB 10M/100M ethernet adapter + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/stddef.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/usb/usbnet.h> +#include <linux/slab.h> + +#define CH9200_VID 0x1A86 +#define CH9200_PID_E092 0xE092 + +#define CTRL_TIMEOUT_MS 1000 + +#define CONTROL_TIMEOUT_MS 1000 + +#define REQUEST_READ 0x0E +#define REQUEST_WRITE 0x0F + +/* Address space: + * 00-63 : MII + * 64-128: MAC + * + * Note: all accesses must be 16-bit + */ + +#define MAC_REG_CTRL 64 +#define MAC_REG_STATUS 66 +#define MAC_REG_INTERRUPT_MASK 68 +#define MAC_REG_PHY_COMMAND 70 +#define MAC_REG_PHY_DATA 72 +#define MAC_REG_STATION_L 74 +#define MAC_REG_STATION_M 76 +#define MAC_REG_STATION_H 78 +#define MAC_REG_HASH_L 80 +#define MAC_REG_HASH_M1 82 +#define MAC_REG_HASH_M2 84 +#define MAC_REG_HASH_H 86 +#define MAC_REG_THRESHOLD 88 +#define MAC_REG_FIFO_DEPTH 90 +#define MAC_REG_PAUSE 92 +#define MAC_REG_FLOW_CONTROL 94 + +/* Control register bits + * + * Note: bits 13 and 15 are reserved + */ +#define LOOPBACK (0x01 << 14) +#define BASE100X (0x01 << 12) +#define MBPS_10 (0x01 << 11) +#define DUPLEX_MODE (0x01 << 10) +#define PAUSE_FRAME (0x01 << 9) +#define PROMISCUOUS (0x01 << 8) +#define MULTICAST (0x01 << 7) +#define BROADCAST (0x01 << 6) +#define HASH (0x01 << 5) +#define APPEND_PAD (0x01 << 4) +#define APPEND_CRC (0x01 << 3) +#define TRANSMITTER_ACTION (0x01 << 2) +#define RECEIVER_ACTION (0x01 << 1) +#define DMA_ACTION (0x01 << 0) + +/* Status register bits + * + * Note: bits 7-15 are reserved + */ +#define ALIGNMENT (0x01 << 6) +#define FIFO_OVER_RUN (0x01 << 5) +#define FIFO_UNDER_RUN (0x01 << 4) +#define RX_ERROR (0x01 << 3) +#define RX_COMPLETE (0x01 << 2) +#define TX_ERROR (0x01 << 1) +#define TX_COMPLETE (0x01 << 0) + +/* FIFO depth register bits + * + * Note: bits 6 and 14 are reserved + */ + +#define ETH_TXBD (0x01 << 15) +#define ETN_TX_FIFO_DEPTH (0x01 << 8) +#define ETH_RXBD (0x01 << 7) +#define ETH_RX_FIFO_DEPTH (0x01 << 0) + +static int control_read(struct usbnet *dev, + unsigned char request, unsigned short value, + unsigned short index, void *data, unsigned short size, + int timeout) +{ + unsigned char *buf = NULL; + unsigned char request_type; + int err = 0; + + if (request == REQUEST_READ) + request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER); + else + request_type = (USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE); + + netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n", + index, size); + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + + err = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + request, request_type, value, index, buf, size, + timeout); + if (err == size) + memcpy(data, buf, size); + else if (err >= 0) + err = -EINVAL; + kfree(buf); + + return err; + +err_out: + return err; +} + +static int control_write(struct usbnet *dev, unsigned char request, + unsigned short value, unsigned short index, + void *data, unsigned short size, int timeout) +{ + unsigned char *buf = NULL; + unsigned char request_type; + int err = 0; + + if (request == REQUEST_WRITE) + request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_OTHER); + else + request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE); + + netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n", + index, size); + + if (data) { + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + memcpy(buf, data, size); + } + + err = usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + request, request_type, value, index, buf, size, + timeout); + if (err >= 0 && err < size) + err = -EINVAL; + kfree(buf); + + return 0; + +err_out: + return err; +} + +static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) +{ + struct usbnet *dev = netdev_priv(netdev); + unsigned char buff[2]; + + netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n", + phy_id, loc); + + if (phy_id != 0) + return -ENODEV; + + control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); + + return (buff[0] | buff[1] << 8); +} + +static void ch9200_mdio_write(struct net_device *netdev, + int phy_id, int loc, int val) +{ + struct usbnet *dev = netdev_priv(netdev); + unsigned char buff[2]; + + netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n", + phy_id, loc); + + if (phy_id != 0) + return; + + buff[0] = (unsigned char)val; + buff[1] = (unsigned char)(val >> 8); + + control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); +} + +static int ch9200_link_reset(struct usbnet *dev) +{ + struct ethtool_cmd ecmd; + + mii_check_media(&dev->mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + + netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n", + ecmd.speed, ecmd.duplex); + + return 0; +} + +static void ch9200_status(struct usbnet *dev, struct urb *urb) +{ + int link; + unsigned char *buf; + + if (urb->actual_length < 16) + return; + + buf = urb->transfer_buffer; + link = !!(buf[0] & 0x01); + + if (link) { + netif_carrier_on(dev->net); + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + } else { + netif_carrier_off(dev->net); + } +} + +static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) +{ + int i = 0; + int len = 0; + int tx_overhead = 0; + + tx_overhead = 0x40; + + len = skb->len; + if (skb_headroom(skb) < tx_overhead) { + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + __skb_push(skb, tx_overhead); + /* usbnet adds padding if length is a multiple of packet size + * if so, adjust length value in header + */ + if ((skb->len % dev->maxpacket) == 0) + len++; + + skb->data[0] = len; + skb->data[1] = len >> 8; + skb->data[2] = 0x00; + skb->data[3] = 0x80; + + for (i = 4; i < 48; i++) + skb->data[i] = 0x00; + + skb->data[48] = len; + skb->data[49] = len >> 8; + skb->data[50] = 0x00; + skb->data[51] = 0x80; + + for (i = 52; i < 64; i++) + skb->data[i] = 0x00; + + return skb; +} + +static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + int len = 0; + int rx_overhead = 0; + + rx_overhead = 64; + + if (unlikely(skb->len < rx_overhead)) { + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); + return 0; + } + + len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8); + skb_trim(skb, len); + + return 1; +} + +static int get_mac_address(struct usbnet *dev, unsigned char *data) +{ + int err = 0; + unsigned char mac_addr[0x06]; + int rd_mac_len = 0; + + netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", + dev->udev->descriptor.idVendor, + dev->udev->descriptor.idProduct); + + memset(mac_addr, 0, sizeof(mac_addr)); + rd_mac_len = control_read(dev, REQUEST_READ, 0, + MAC_REG_STATION_L, mac_addr, 0x02, + CONTROL_TIMEOUT_MS); + rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M, + mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS); + rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H, + mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS); + if (rd_mac_len != ETH_ALEN) + err = -EINVAL; + + data[0] = mac_addr[5]; + data[1] = mac_addr[4]; + data[2] = mac_addr[3]; + data[3] = mac_addr[2]; + data[4] = mac_addr[1]; + data[5] = mac_addr[0]; + + return err; +} + +static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int retval = 0; + unsigned char data[2]; + + retval = usbnet_get_endpoints(dev, intf); + if (retval) + return retval; + + dev->mii.dev = dev->net; + dev->mii.mdio_read = ch9200_mdio_read; + dev->mii.mdio_write = ch9200_mdio_write; + dev->mii.reg_num_mask = 0x1f; + + dev->mii.phy_id_mask = 0x1f; + + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + dev->rx_urb_size = 24 * 64 + 16; + mii_nway_restart(&dev->mii); + + data[0] = 0x01; + data[1] = 0x0F; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0xA0; + data[1] = 0x90; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0x30; + data[1] = 0x00; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0x17; + data[1] = 0xD8; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL, + data, 0x02, CONTROL_TIMEOUT_MS); + + /* Undocumented register */ + data[0] = 0x01; + data[1] = 0x00; + retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02, + CONTROL_TIMEOUT_MS); + + data[0] = 0x5F; + data[1] = 0x0D; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02, + CONTROL_TIMEOUT_MS); + + retval = get_mac_address(dev, dev->net->dev_addr); + + return retval; +} + +static const struct driver_info ch9200_info = { + .description = "CH9200 USB to Network Adaptor", + .flags = FLAG_ETHER, + .bind = ch9200_bind, + .rx_fixup = ch9200_rx_fixup, + .tx_fixup = ch9200_tx_fixup, + .status = ch9200_status, + .link_reset = ch9200_link_reset, + .reset = ch9200_link_reset, +}; + +static const struct usb_device_id ch9200_products[] = { + { + USB_DEVICE(0x1A86, 0xE092), + .driver_info = (unsigned long)&ch9200_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(usb, ch9200_products); + +static struct usb_driver ch9200_driver = { + .name = "ch9200", + .id_table = ch9200_products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + +module_usb_driver(ch9200_driver); + +MODULE_DESCRIPTION("QinHeng CH9200 USB Network device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index e7094fbd7568..488c6f50df73 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, .flowi4_oif = vrf_dev->ifindex, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), - .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, + .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC | + FLOWI_FLAG_SKIP_NH_OIF, .daddr = ip4h->daddr, }; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index cf8b7f0473b3..afdc65fd5bc5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); - if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) - dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; - else - dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; @@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, dst->remote_ip.sa.sa_family = AF_INET; if (dst->remote_ip.sa.sa_family == AF_INET6 || - vxlan->cfg.saddr.sa.sa_family == AF_INET6) + vxlan->cfg.saddr.sa.sa_family == AF_INET6) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; use_ipv6 = true; + } if (conf->remote_ifindex) { struct net_device *lowerdev @@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, dev->needed_headroom = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - } else if (use_ipv6) + } else if (use_ipv6) { vxlan->flags |= VXLAN_F_IPV6; + dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; + } else { + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; + } memcpy(&vxlan->cfg, conf, sizeof(*conf)); if (!vxlan->cfg.dst_port) @@ -2742,11 +2745,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct vxlan_config conf; int err; - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; - memset(&conf, 0, sizeof(conf)); - conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_ID]) + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); if (data[IFLA_VXLAN_GROUP]) { conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 23afcda2de96..678d72af4a9d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -337,7 +337,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_MAX_FRAG_ENTRIES 0 /* 10.2 parameters */ -#define TARGET_10_2_DMA_BURST_SIZE 1 +#define TARGET_10_2_DMA_BURST_SIZE 0 /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 @@ -391,7 +391,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 1 +#define TARGET_10_4_DMA_BURST_SIZE 0 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 57f95f2dca5b..90eb75012e4f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -880,6 +880,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->max_rate_tries = 10; hw->sta_data_size = sizeof(struct ath_node); hw->vif_data_size = sizeof(struct ath_vif); + hw->extra_tx_headroom = 4; hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 28490702124a..71d3e9adbf3c 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over #ifdef CONFIG_B43_BCMA static const struct bcma_device_id b43_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS), diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index ab45819c1fbb..e18629a16fb0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( + aes_sc[i].pn = cpu_to_le64( (u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 6951aba620eb..3fb327d5a911 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -348,6 +348,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { }; MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 04264e417c1c..576187611e61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; } @@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); } data->use_rsc_tsc = true; break; @@ -1453,15 +1448,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq); iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); + ieee80211_set_key_tx_seq(key, &seq); break; } - ieee80211_set_key_tx_seq(key, &seq); /* that's it for this key */ return; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 4a0ce83315bd..5c7f7cc9ffcc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -703,7 +703,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ - if (iwl_mvm_is_radio_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); iwl_remove_notification(&mvm->notif_wait, &calib_wait); @@ -736,7 +736,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); - if (ret && iwl_mvm_is_radio_killed(mvm)) { + if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 1; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index aa8c2b7f23c7..7c2944a72470 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2388,6 +2388,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); + mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index b95a07ec9e36..c754051a4cea 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -860,6 +860,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } +static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); +} + /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index a37de3f410a0..f0cb092f980e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -590,6 +590,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); out_free: + flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index b0825c402c73..644b58bc5226 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 5932306084fd..bf9afbf46c1b 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3070) }, { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, { USB_DEVICE(0x1b75, 0xa200) }, diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h index d4567d12e07e..5da6703942d9 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/rtlwifi/pci.h @@ -247,6 +247,8 @@ struct rtl_pci { /* MSI support */ bool msi_support; bool using_msi; + /* interrupt clear before set */ + bool int_clear; }; struct mp_adapter { diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index b7f18e2155eb..6e9418ed90c2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp = rtl_read_dword(rtlpriv, REG_HISR); + + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + if (!rtlpci->int_clear) + rtl8821ae_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c index a4988121e1ab..8ee141a55bc5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c @@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, + .int_clear = true, .debug = DBG_EMERG, .disable_watchdog = 0, }; @@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, bool, 0444); +module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index b90ca618b123..4544752a2ba8 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -2249,6 +2249,9 @@ struct rtl_mod_params { /* default 0: 1 means disable */ bool disable_watchdog; + + /* default 0: 1 means do not disable interrupts */ + bool int_clear; }; struct rtl_hal_usbint_cfg { diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 929a6e7e5ecf..56ebd8267386 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -788,6 +788,12 @@ static void connect(struct backend_info *be) /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); + if (!be->vif->queues) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating queues"); + return; + } + be->vif->num_queues = requested_num_queues; be->vif->stalled_queues = requested_num_queues; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 59ad54a63d9f..cb477518dd0e 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -128,13 +128,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - nvdimm_bus_lock(dev); device_lock(dev); + nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); nvdimm_bus_unlock(dev); + device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3fd7d0d81a47..71805a1aa0f3 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -148,13 +148,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn(dev); ssize_t rc; - nvdimm_bus_lock(dev); device_lock(dev); + nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); nvdimm_bus_unlock(dev); + device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b9525385c0dc..0ba6a978f227 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -92,6 +92,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct pmem_device *pmem = bdev->bd_disk->private_data; pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); + if (rw & WRITE) + wmb_pmem(); page_endio(page, rw & WRITE, 0); return 0; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d3c6676b3c0c..6fd4e5a5ef4a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from reading */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from writing */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, return rc; /* shift bits in-place */ - if (cell->bit_offset || cell->bit_offset) + if (cell->bit_offset || cell->nbits) nvmem_shift_read_buffer_in_place(cell, buf); *len = cell->bytes; @@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); /* free the tmp buffer */ - if (cell->bit_offset) + if (cell->bit_offset || cell->nbits) kfree(buf); if (IS_ERR_VALUE(rc)) diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index 14777dd5212d..cfa3b85064dd 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) struct nvmem_device *nvmem; struct regmap *regmap; struct sunxi_sid *sid; - int i, size; + int ret, i, size; char *randomness; sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); @@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev) return PTR_ERR(nvmem); randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + for (i = 0; i < size; i++) randomness[i] = sunxi_sid_read_byte(sid, i); @@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nvmem); return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; } static int sunxi_sid_remove(struct platform_device *pdev) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 1350fa25cdb0..a87a868fed64 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np) * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node * - * Returns a pointer to the phy_device. + * If successful, returns a pointer to the phy_device with the embedded + * struct device refcount incremented by one, or NULL on failure. */ struct phy_device *of_phy_find_device(struct device_node *phy_np) { @@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device); * @hndlr: Link state callback for the network device * @iface: PHY data interface type * - * Returns a pointer to the phy_device if successful. NULL otherwise + * If successful, returns a pointer to the phy_device with the embedded + * struct device refcount incremented by one, or NULL on failure. The + * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_connect(struct net_device *dev, struct device_node *phy_np, @@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev, phy_interface_t iface) { struct phy_device *phy = of_phy_find_device(phy_np); + int ret; if (!phy) return NULL; phy->dev_flags = flags; - return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; + ret = phy_connect_direct(dev, phy, hndlr, iface); + + /* refcount is held by phy_connect_direct() on success */ + put_device(&phy->dev); + + return ret ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); @@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect); * @phy_np: Node pointer for the PHY * @flags: flags to pass to the PHY * @iface: PHY data interface type + * + * If successful, returns a pointer to the phy_device with the embedded + * struct device refcount incremented by one, or NULL on failure. The + * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_attach(struct net_device *dev, struct device_node *phy_np, u32 flags, phy_interface_t iface) { struct phy_device *phy = of_phy_find_device(phy_np); + int ret; if (!phy) return NULL; - return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; + ret = phy_attach_direct(dev, phy, flags, iface); + + /* refcount is held by phy_attach_direct() on success */ + put_device(&phy->dev); + + return ret ? NULL : phy; } EXPORT_SYMBOL(of_phy_attach); diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c index 1710d9dc7fc2..2306313c0029 100644 --- a/drivers/of/of_pci_irq.c +++ b/drivers/of/of_pci_irq.c @@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq */ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); if (rc != 0) - return rc; - /* No pin, exit */ + goto err; + /* No pin, exit with no error message. */ if (pin == 0) return -ENODEV; @@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq ppnode = pci_bus_to_OF_node(pdev->bus); /* No node for host bridge ? give up */ - if (ppnode == NULL) - return -EINVAL; + if (ppnode == NULL) { + rc = -EINVAL; + goto err; + } } else { /* We found a P2P bridge, check if it has a node */ ppnode = pci_device_to_OF_node(ppdev); @@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq out_irq->args[0] = pin; laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); laddr[1] = laddr[2] = cpu_to_be32(0); - return of_irq_parse_raw(laddr, out_irq); + rc = of_irq_parse_raw(laddr, out_irq); + if (rc) + goto err; + return 0; +err: + dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc); + return rc; } EXPORT_SYMBOL_GPL(of_irq_parse_pci); @@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) int ret; ret = of_irq_parse_pci(dev, &oirq); - if (ret) { - dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret); + if (ret) return 0; /* Proper return code 0 == NO_IRQ */ - } return irq_create_of_mapping(&oirq); } diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index baec33c4e698..a0580afe1713 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus) } else if (bus->parent) { int i; + pci_read_bridge_bases(bus); + + for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { if((bus->self->resource[i].flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 7b9e89ba0465..a32c1f6c252c 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus) if (bus->parent) { int i; /* PCI-PCI Bridge */ + pci_read_bridge_bases(bus); for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) pci_claim_bridge_resource(bus->self, i); } else { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 769f7e35f1a2..59ac36fe7c42 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = { static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, void *arg) { - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); + struct pci_dev *tdev = pci_get_slot(dev->bus, + PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); ssize_t ret; if (!tdev) @@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, const void *arg) { - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); + struct pci_dev *tdev = pci_get_slot(dev->bus, + PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); ssize_t ret; if (!tdev) @@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = { .release = pci_vpd_pci22_release, }; -static int pci_vpd_f0_dev_check(struct pci_dev *dev) -{ - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); - int ret = 0; - - if (!tdev) - return -ENODEV; - if (!tdev->vpd || !tdev->multifunction || - dev->class != tdev->class || dev->vendor != tdev->vendor || - dev->device != tdev->device) - ret = -ENODEV; - - pci_dev_put(tdev); - return ret; -} - int pci_vpd_pci22_init(struct pci_dev *dev) { struct pci_vpd_pci22 *vpd; @@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev) cap = pci_find_capability(dev, PCI_CAP_ID_VPD); if (!cap) return -ENODEV; - if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { - int ret = pci_vpd_f0_dev_check(dev); - if (ret) - return ret; - } vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); if (!vpd) return -ENOMEM; diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 6fbd3f2b5992..d3346d23963b 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) res->start = start; res->end = end; + res->flags &= ~IORESOURCE_UNSET; + orig_res.flags &= ~IORESOURCE_UNSET; dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", &orig_res, res); diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 81253e70b1c5..0aa81bd3de12 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -110,7 +110,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) return -EINVAL; } -static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void ks_pcie_msi_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); @@ -138,8 +138,7 @@ static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) * Traverse through pending legacy interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ -static void ks_pcie_legacy_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 367e28fa7564..c4f64bfee551 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev) static struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, + { .compatible = "renesas,pci-r8a7794", }, { }, }; diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 996327cfa1e1..e491681daf22 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -295,7 +295,7 @@ static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) return 0; } -static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) +static void xgene_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct xgene_msi_group *msi_groups; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d4497141d083..4a7da3c3e035 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1243,6 +1243,10 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) BUG_ON(!chip); if (!chip->irq_write_msi_msg) chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; } /** diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver - * supports runtime PM, it should call pm_runtime_put_noidle() - * in its probe routine and pm_runtime_get_noresume() in its - * remove routine. + * supports runtime PM, it should call pm_runtime_put_noidle(), + * or any other runtime PM helper function decrementing the usage + * count, in its probe routine and pm_runtime_get_noresume() in + * its remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 0b2be174d981..8361d27e5eca 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) static void pci_set_bus_msi_domain(struct pci_bus *bus) { struct irq_domain *d; + struct pci_bus *b; /* - * Either bus is the root, and we must obtain it from the - * firmware, or we inherit it from the bridge device. + * The bus can be a root bus, a subordinate bus, or a virtual bus + * created by an SR-IOV device. Walk up to the first bridge device + * found or derive the domain from the host bridge. */ - if (pci_is_root_bus(bus)) - d = pci_host_bridge_msi_domain(bus); - else - d = dev_get_msi_domain(&bus->self->dev); + for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { + if (b->self) + d = dev_get_msi_domain(&b->self->dev); + } + + if (!d) + d = pci_host_bridge_msi_domain(b); dev_set_msi_domain(&bus->dev, d); } @@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) child->bridge_ctl = bctl; } - /* Read and initialize bridge resources */ - pci_read_bridge_bases(child); - cmax = pci_scan_child_bus(child); if (cmax > subordinate) dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", @@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) if (!is_cardbus) { child->bridge_ctl = bctl; - - /* Read and initialize bridge resources */ - pci_read_bridge_bases(child); max = pci_scan_child_bus(child); } else { /* diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6a30252cd79f..b03373fd05ca 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev) DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); +/* + * Quirk non-zero PCI functions to route VPD access through function 0 for + * devices that share VPD resources between functions. The functions are + * expected to be identical devices. + */ static void quirk_f0_vpd_link(struct pci_dev *dev) { - if (!dev->multifunction || !PCI_FUNC(dev->devfn)) + struct pci_dev *f0; + + if (!PCI_FUNC(dev->devfn)) return; - dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; + + f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + if (!f0) + return; + + if (f0->vpd && dev->class == f0->class && + dev->vendor == f0->vendor && dev->device == f0->device) + dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; + + pci_dev_put(f0); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2365a32a595e..be3755c973e9 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -823,9 +823,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) } /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) - if (dn == of_cpu_device_node_get(cpu)) + for_each_possible_cpu(cpu) { + struct device_node *cpu_dn; + + cpu_dn = of_cpu_device_node_get(cpu); + of_node_put(cpu_dn); + + if (dn == cpu_dn) break; + } if (cpu >= nr_cpu_ids) { pr_warn("Failed to find logical CPU for %s\n", diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 0062027afb1e..77a2e054fdea 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = { { .compatible = "marvell,berlin2q-sata-phy" }, { }, }; +MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); static struct platform_driver phy_berlin_sata_driver = { .probe = phy_berlin_sata_probe, diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 49a1ed0cef56..107cb57c3513 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c @@ -432,6 +432,7 @@ out_disable_src: out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); static int ufs_qcom_phy_disable_vreg(struct phy *phy, @@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) phy->is_ref_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); #define UFS_REF_CLK_EN (1 << 5) @@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); /* Turn ON M-PHY RMMI interface clocks */ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) @@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); /* Turn OFF M-PHY RMMI interface clocks */ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) @@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) phy->is_iface_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); int ufs_qcom_phy_start_serdes(struct phy *generic_phy) { @@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) { @@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, u8 major, u16 minor, u16 step) @@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, ufs_qcom_phy->host_ctrl_rev_minor = minor; ufs_qcom_phy->host_ctrl_rev_step = step; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) { @@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); int ufs_qcom_phy_remove(struct phy *generic_phy, struct ufs_qcom_phy *ufs_qcom_phy) @@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) return ufs_qcom_phy->phy_spec_ops-> is_physical_coding_sublayer_ready(ufs_qcom_phy); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); int ufs_qcom_phy_power_on(struct phy *generic_phy) { diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 5a5c073e72fe..91d6f342c565 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) struct device_node *child; struct regmap *grf; unsigned int reg_offset; + int err; grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(grf)) { @@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) return PTR_ERR(rk_phy->phy); } phy_set_drvdata(rk_phy->phy, rk_phy); + + /* only power up usb phy when it use, so disable it when init*/ + err = rockchip_usb_phy_power(rk_phy, 1); + if (err) + return err; } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c index 7d9482bf8252..1ca783098e47 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c @@ -143,7 +143,7 @@ static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg, return !!(readl(chip->base + offset) & BIT(shift)); } -static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void cygnus_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct cygnus_gpio *chip = to_cygnus_gpio(gc); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 69723e07036b..9638a00c67c2 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -349,6 +349,9 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio) struct pinctrl_gpio_range *range = NULL; struct gpio_chip *chip = gpio_to_chip(gpio); + if (WARN(!chip, "no gpio_chip for gpio%i?", gpio)) + return false; + mutex_lock(&pinctrldev_list_mutex); /* Loop over the pin controllers */ diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index faf635654312..293ed4381cc0 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c @@ -26,7 +26,8 @@ #include "pinctrl-imx.h" enum imx25_pads { - MX25_PAD_RESERVE0 = 1, + MX25_PAD_RESERVE0 = 0, + MX25_PAD_RESERVE1 = 1, MX25_PAD_A10 = 2, MX25_PAD_A13 = 3, MX25_PAD_A14 = 4, @@ -169,6 +170,7 @@ enum imx25_pads { /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), + IMX_PINCTRL_PIN(MX25_PAD_RESERVE1), IMX_PINCTRL_PIN(MX25_PAD_A10), IMX_PINCTRL_PIN(MX25_PAD_A13), IMX_PINCTRL_PIN(MX25_PAD_A14), diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index dac4865f3203..f79ea430f651 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -425,7 +425,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) } } -static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void byt_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2d5d3ddc36e5..270c127e03ea 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1414,7 +1414,7 @@ static struct irq_chip chv_gpio_irqchip = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index bb377c110541..54848b8decef 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -836,7 +836,7 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc, } } -static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void intel_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 7726c6caaf83..1b22f96ba839 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1190,7 +1190,7 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index) } } -static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc) +static void mtk_eint_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 352ede13a9e9..96cf03908e93 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -860,7 +860,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status) chained_irq_exit(host_chip, desc); } -static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void nmk_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); @@ -873,7 +873,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) __nmk_gpio_irq_handler(desc, status); } -static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc) +static void nmk_gpio_latent_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index a5976ebc4482..f6be68518c87 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c @@ -530,8 +530,7 @@ static inline void preflow_handler(struct irq_desc *desc) static inline void preflow_handler(struct irq_desc *desc) { } #endif -static void adi_gpio_handle_pint_irq(unsigned int inta_irq, - struct irq_desc *desc) +static void adi_gpio_handle_pint_irq(struct irq_desc *desc) { u32 request; u32 level_mask, hwirq; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 5e86bb8ca80e..3318f1d6193c 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -492,15 +492,15 @@ static struct irq_chip amd_gpio_irqchip = { .irq_set_type = amd_gpio_irq_set_type, }; -static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void amd_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); u32 i; u32 off; u32 reg; u32 pin_reg; u64 reg64; int handled = 0; + unsigned int irq; unsigned long flags; struct irq_chip *chip = irq_desc_get_chip(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -541,7 +541,7 @@ static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) } if (handled == 0) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); spin_lock_irqsave(&gpio_dev->lock, flags); reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index bae0012ee356..b0fde0f385e6 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1585,7 +1585,7 @@ static struct irq_chip gpio_irqchip = { .irq_set_wake = gpio_irq_set_wake, }; -static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index 3731cc67a88b..9c9b88934bcc 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c @@ -519,7 +519,7 @@ static struct irq_chip u300_gpio_irqchip = { .irq_set_type = u300_gpio_irq_type, }; -static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc) +static void u300_gpio_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_chip *parent_chip = irq_desc_get_chip(desc); diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c index 461fffc4c62a..11f8b835d3b6 100644 --- a/drivers/pinctrl/pinctrl-digicolor.c +++ b/drivers/pinctrl/pinctrl-digicolor.c @@ -337,9 +337,9 @@ static int dc_pinctrl_probe(struct platform_device *pdev) pmap->dev = &pdev->dev; pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap); - if (!pmap->pctl) { + if (IS_ERR(pmap->pctl)) { dev_err(&pdev->dev, "pinctrl driver registration failed\n"); - return -EINVAL; + return PTR_ERR(pmap->pctl); } ret = dc_gpiochip_add(pmap, pdev->dev.of_node); diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 3dc2ae15f3a1..952b1c623887 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1303,20 +1303,18 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type) } if (type & IRQ_TYPE_LEVEL_MASK) - __irq_set_handler_locked(data->irq, handle_level_irq); + irq_set_handler_locked(data, handle_level_irq); else - __irq_set_handler_locked(data->irq, handle_edge_irq); + irq_set_handler_locked(data, handle_edge_irq); return 0; } -static void pistachio_gpio_irq_handler(unsigned int __irq, - struct irq_desc *desc) +static void pistachio_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct pistachio_gpio_bank *bank = gc_to_bank(gc); - struct irq_chip *chip = irq_get_chip(irq); + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; unsigned int pin; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index c5246c05f70c..88bb707e107a 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1475,7 +1475,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = { * Interrupt handling */ -static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc) +static void rockchip_irq_demux(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index bf548c2a7a9d..ef04b962c3d5 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1679,7 +1679,7 @@ static irqreturn_t pcs_irq_handler(int irq, void *d) * Use this if you have a separate interrupt for each * pinctrl-single instance. */ -static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) +static void pcs_irq_chain_handler(struct irq_desc *desc) { struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); struct irq_chip *chip; diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index f8338d2e6b6b..389526e704fb 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1460,7 +1460,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank) } } -static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void st_gpio_irq_handler(struct irq_desc *desc) { /* interrupt dedicated per bank */ struct irq_chip *chip = irq_desc_get_chip(desc); @@ -1472,7 +1472,7 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) +static void st_gpio_irqmux_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct st_pinctrl *info = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 67e08cb315c4..29984b36926a 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -313,8 +313,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev, /* See if this pctldev has this function */ while (selector < nfuncs) { - const char *fname = ops->get_function_name(pctldev, - selector); + const char *fname = ops->get_function_name(pctldev, selector); if (!strcmp(function, fname)) return selector; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 492cdd51dc5c..a0c7407c1cac 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -765,9 +765,8 @@ static struct irq_chip msm_gpio_irq_chip = { .irq_set_wake = msm_gpio_irq_set_wake, }; -static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) +static void msm_gpio_irq_handler(struct irq_desc *desc) { - unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); const struct msm_pingroup *g; struct msm_pinctrl *pctrl = to_msm_pinctrl(gc); @@ -795,7 +794,7 @@ static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc) /* No interrupts were flagged */ if (handled == 0) - handle_bad_irq(irq, desc); + handle_bad_irq(desc); chained_irq_exit(chip, desc); } diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index c978b311031b..e1a3721bc8e5 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -723,9 +723,9 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) #endif pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); - if (!pctrl->pctrl) { + if (IS_ERR(pctrl->pctrl)) { dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n"); - return -ENODEV; + return PTR_ERR(pctrl->pctrl); } pctrl->chip = pm8xxx_gpio_template; diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 2d1b69f171be..6652b8d7f707 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -814,9 +814,9 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) #endif pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl); - if (!pctrl->pctrl) { + if (IS_ERR(pctrl->pctrl)) { dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n"); - return -ENODEV; + return PTR_ERR(pctrl->pctrl); } pctrl->chip = pm8xxx_mpp_template; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 5f45caaef46d..71ccf6a90b22 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -419,7 +419,7 @@ static const struct of_device_id exynos_wkup_irq_ids[] = { }; /* interrupt handler for wakeup interrupts 0..15 */ -static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_eint0_15(struct irq_desc *desc) { struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc); struct samsung_pin_bank *bank = eintd->bank; @@ -451,7 +451,7 @@ static inline void exynos_irq_demux_eint(unsigned long pend, } /* interrupt handler for wakeup interrupt 16 */ -static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_demux_eint16_31(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 019844d479bb..3d92f827da7a 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -240,7 +240,7 @@ static struct irq_chip s3c2410_eint0_3_chip = { .irq_set_type = s3c24xx_eint_type, }; -static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c2410_demux_eint0_3(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); @@ -295,7 +295,7 @@ static struct irq_chip s3c2412_eint0_3_chip = { .irq_set_type = s3c24xx_eint_type, }; -static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c2412_demux_eint0_3(struct irq_desc *desc) { struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc); struct irq_data *data = irq_desc_get_irq_data(desc); @@ -361,7 +361,7 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, u32 offset, u32 range) { struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); - struct irq_chip *chip = irq_desc_get_irq_chip(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct samsung_pinctrl_drv_data *d = data->drvdata; unsigned int pend, mask; @@ -388,12 +388,12 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, chained_irq_exit(chip, desc); } -static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc) +static void s3c24xx_demux_eint4_7(struct irq_desc *desc) { s3c24xx_demux_eint(desc, 0, 0xf0); } -static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc) +static void s3c24xx_demux_eint8_23(struct irq_desc *desc) { s3c24xx_demux_eint(desc, 8, 0xffff00); } diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index f5ea40a69711..43407ab248f5 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -407,7 +407,7 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = { .xlate = irq_domain_xlate_twocell, }; -static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); @@ -631,22 +631,22 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range) chained_irq_exit(chip, desc); } -static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint0_3(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xf); } -static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint4_11(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff0); } -static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint12_19(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff000); } -static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc) +static void s3c64xx_demux_eint20_27(struct irq_desc *desc) { s3c64xx_irq_demux_eint(desc, 0xff00000); } diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 9df0c5f25824..0d24d9e4b70c 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -4489,7 +4489,7 @@ static struct irq_chip atlas7_gpio_irq_chip = { .irq_set_type = atlas7_gpio_irq_type, }; -static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) +static void atlas7_gpio_handle_irq(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc); @@ -4512,7 +4512,7 @@ static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) if (!status) { pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n", __func__, gc->label, status); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); return; } diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index f8bd9fb52033..2a8d69725de8 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -545,7 +545,7 @@ static struct irq_chip sirfsoc_irq_chip = { .irq_set_type = sirfsoc_gpio_irq_type, }; -static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) +static void sirfsoc_gpio_handle_irq(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -570,7 +570,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc) printk(KERN_WARNING "%s: gpio id %d status %#x no interrupt is flagged\n", __func__, bank->id, status); - handle_bad_irq(irq, desc); + handle_bad_irq(desc); return; } diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c index ae8f29fb5536..1f0af250dbb5 100644 --- a/drivers/pinctrl/spear/pinctrl-plgpio.c +++ b/drivers/pinctrl/spear/pinctrl-plgpio.c @@ -356,7 +356,7 @@ static struct irq_chip plgpio_irqchip = { .irq_set_type = plgpio_irq_set_type, }; -static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void plgpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct plgpio *plgpio = container_of(gc, struct plgpio, chip); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c index 63676617bc59..f9a3f8f446f7 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c @@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "uart3"), /* PWM1 */ + SUNXI_FUNCTION(0x3, "pwm"), /* PWM1 */ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */ }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index fb4669c0ce0e..38e0c7bdd2ac 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -617,13 +617,11 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) spin_lock_irqsave(&pctl->lock, flags); if (type & IRQ_TYPE_LEVEL_MASK) - __irq_set_chip_handler_name_locked(d->irq, - &sunxi_pinctrl_level_irq_chip, - handle_fasteoi_irq, NULL); + irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_level_irq_chip, + handle_fasteoi_irq, NULL); else - __irq_set_chip_handler_name_locked(d->irq, - &sunxi_pinctrl_edge_irq_chip, - handle_edge_irq, NULL); + irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_edge_irq_chip, + handle_edge_irq, NULL); regval = readl(pctl->membase + reg); regval &= ~(IRQ_CFG_IRQ_MASK << index); @@ -742,7 +740,7 @@ static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = { .xlate = sunxi_pinctrl_irq_of_xlate, }; -static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc) +static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c index 7e9dae54fcb2..2df8bbecebfc 100644 --- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c +++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c @@ -22,49 +22,49 @@ #define DRIVER_NAME "ph1-sld8-pinctrl" static const struct pinctrl_pin_desc ph1_sld8_pins[] = { - UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(0, "PCA00", 0, 15, UNIPHIER_PIN_DRV_4_8, 15, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(1, "PCA01", 0, 16, UNIPHIER_PIN_DRV_4_8, 16, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(2, "PCA02", 0, 17, UNIPHIER_PIN_DRV_4_8, 17, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(3, "PCA03", 0, 18, UNIPHIER_PIN_DRV_4_8, 18, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(4, "PCA04", 0, 19, UNIPHIER_PIN_DRV_4_8, 19, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(5, "PCA05", 0, 20, UNIPHIER_PIN_DRV_4_8, 20, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(6, "PCA06", 0, 21, UNIPHIER_PIN_DRV_4_8, 21, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(7, "PCA07", 0, 22, UNIPHIER_PIN_DRV_4_8, 22, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(8, "PCA08", 0, 23, UNIPHIER_PIN_DRV_4_8, 23, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(9, "PCA09", 0, 24, UNIPHIER_PIN_DRV_4_8, 24, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(10, "PCA10", 0, 25, UNIPHIER_PIN_DRV_4_8, 25, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(11, "PCA11", 0, 26, UNIPHIER_PIN_DRV_4_8, 26, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(12, "PCA12", 0, 27, UNIPHIER_PIN_DRV_4_8, 27, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(13, "PCA13", 0, 28, UNIPHIER_PIN_DRV_4_8, 28, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(14, "PCA14", 0, 29, UNIPHIER_PIN_DRV_4_8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, @@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, 36, UNIPHIER_PIN_DRV_8_12_16_20, 128, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8, 40, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8, 44, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8, 48, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8, 52, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8, 56, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8, 60, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(38, "SDCD", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 129, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(39, "SDWP", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 130, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9, -1, UNIPHIER_PIN_DRV_FIXED_4, 131, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0, 37, UNIPHIER_PIN_DRV_4_8, 37, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0, 38, UNIPHIER_PIN_DRV_4_8, 38, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0, 39, UNIPHIER_PIN_DRV_4_8, 39, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0, 40, UNIPHIER_PIN_DRV_4_8, 40, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0, 41, UNIPHIER_PIN_DRV_4_8, 41, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(46, "PCREG", 0, 42, UNIPHIER_PIN_DRV_4_8, 42, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0, 43, UNIPHIER_PIN_DRV_4_8, 43, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0, 44, UNIPHIER_PIN_DRV_4_8, 44, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0, 45, UNIPHIER_PIN_DRV_4_8, 45, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0, 46, UNIPHIER_PIN_DRV_4_8, 46, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0, 47, UNIPHIER_PIN_DRV_4_8, 47, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0, 48, UNIPHIER_PIN_DRV_4_8, 48, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0, 49, UNIPHIER_PIN_DRV_4_8, 49, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(54, "PCWE", 0, 50, UNIPHIER_PIN_DRV_4_8, 50, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(55, "PCOE", 0, 51, UNIPHIER_PIN_DRV_4_8, 51, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0, 52, UNIPHIER_PIN_DRV_4_8, 52, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0, 53, UNIPHIER_PIN_DRV_4_8, 53, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0, 54, UNIPHIER_PIN_DRV_4_8, 54, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0, 55, UNIPHIER_PIN_DRV_4_8, 55, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0, 56, UNIPHIER_PIN_DRV_4_8, 56, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0, 57, UNIPHIER_PIN_DRV_4_8, 57, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0, 58, UNIPHIER_PIN_DRV_4_8, 58, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0, 59, UNIPHIER_PIN_DRV_4_8, 59, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0, 60, UNIPHIER_PIN_DRV_4_8, 60, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0, 61, UNIPHIER_PIN_DRV_4_8, 61, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0, 62, UNIPHIER_PIN_DRV_4_8, 62, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0, 63, UNIPHIER_PIN_DRV_4_8, 63, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0, 64, UNIPHIER_PIN_DRV_4_8, 64, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0, 65, UNIPHIER_PIN_DRV_4_8, 65, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0, 66, UNIPHIER_PIN_DRV_4_8, 66, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0, 67, UNIPHIER_PIN_DRV_4_8, 67, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0, 68, UNIPHIER_PIN_DRV_4_8, 68, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0, 69, UNIPHIER_PIN_DRV_4_8, 69, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0, 70, UNIPHIER_PIN_DRV_4_8, 70, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0, 71, UNIPHIER_PIN_DRV_4_8, 71, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0, 72, UNIPHIER_PIN_DRV_4_8, 72, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0, 73, UNIPHIER_PIN_DRV_4_8, 73, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0, 74, UNIPHIER_PIN_DRV_4_8, 74, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0, 75, UNIPHIER_PIN_DRV_4_8, 75, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0, 76, UNIPHIER_PIN_DRV_4_8, 76, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0, 77, UNIPHIER_PIN_DRV_4_8, 77, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0, 78, UNIPHIER_PIN_DRV_4_8, 78, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0, 79, UNIPHIER_PIN_DRV_4_8, 79, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0, 80, UNIPHIER_PIN_DRV_4_8, 80, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0, 81, UNIPHIER_PIN_DRV_4_8, 81, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0, 82, UNIPHIER_PIN_DRV_4_8, 82, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0, 83, UNIPHIER_PIN_DRV_4_8, 83, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0, 84, UNIPHIER_PIN_DRV_4_8, 84, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0, 85, UNIPHIER_PIN_DRV_4_8, 85, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0, 86, UNIPHIER_PIN_DRV_4_8, 86, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0, 87, UNIPHIER_PIN_DRV_4_8, 87, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(92, "AGCI", 3, -1, UNIPHIER_PIN_DRV_FIXED_4, 132, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(93, "AGCR", 4, -1, UNIPHIER_PIN_DRV_FIXED_4, 133, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5, -1, UNIPHIER_PIN_DRV_FIXED_4, 134, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0, 88, UNIPHIER_PIN_DRV_4_8, 88, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0, 89, UNIPHIER_PIN_DRV_4_8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, @@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, 93, UNIPHIER_PIN_DRV_4_8, 93, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, 94, UNIPHIER_PIN_DRV_4_8, 94, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(102, "SDA0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(103, "SCL0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(104, "SDA1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(105, "SCL1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, @@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, 96, UNIPHIER_PIN_DRV_4_8, 96, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(112, "SBO1", 0, 97, UNIPHIER_PIN_DRV_4_8, 97, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(113, "SBI1", 0, 98, UNIPHIER_PIN_DRV_4_8, 98, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(114, "TXD1", 0, 99, UNIPHIER_PIN_DRV_4_8, 99, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(115, "RXD1", 0, 100, UNIPHIER_PIN_DRV_4_8, 100, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(116, "HIN", 1, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(117, "VIN", 2, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(118, "TCON0", 0, 101, UNIPHIER_PIN_DRV_4_8, 101, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(119, "TCON1", 0, 102, UNIPHIER_PIN_DRV_4_8, 102, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(120, "TCON2", 0, 103, UNIPHIER_PIN_DRV_4_8, 103, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(121, "TCON3", 0, 104, UNIPHIER_PIN_DRV_4_8, 104, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(122, "TCON4", 0, 105, UNIPHIER_PIN_DRV_4_8, 105, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(123, "TCON5", 0, 106, UNIPHIER_PIN_DRV_4_8, 106, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(124, "TCON6", 0, 107, UNIPHIER_PIN_DRV_4_8, 107, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(125, "TCON7", 0, 108, UNIPHIER_PIN_DRV_4_8, 108, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(126, "TCON8", 0, 109, UNIPHIER_PIN_DRV_4_8, 109, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(127, "PWMA", 0, 110, UNIPHIER_PIN_DRV_4_8, 110, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0, 111, UNIPHIER_PIN_DRV_4_8, 111, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0, 112, UNIPHIER_PIN_DRV_4_8, 112, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0, 113, UNIPHIER_PIN_DRV_4_8, 113, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0, 114, UNIPHIER_PIN_DRV_4_8, 114, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0, 115, UNIPHIER_PIN_DRV_4_8, 115, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0, 116, UNIPHIER_PIN_DRV_4_8, 116, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0, 117, UNIPHIER_PIN_DRV_4_8, 117, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0, 118, UNIPHIER_PIN_DRV_4_8, 118, UNIPHIER_PIN_PULL_DOWN), }; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index abdaed34c728..131fee2b093e 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -128,6 +128,24 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X456UA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X456UF", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X501U", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 06697315a088..fb4dd7b3ee71 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_HARDWARE_QUERY 0x4 #define HPWMI_WIRELESS_QUERY 0x5 #define HPWMI_BIOS_QUERY 0x9 +#define HPWMI_FEATURE_QUERY 0xb #define HPWMI_HOTKEY_QUERY 0xc -#define HPWMI_FEATURE_QUERY 0xd +#define HPWMI_FEATURE2_QUERY 0xd #define HPWMI_WIRELESS2_QUERY 0x1b #define HPWMI_POSTCODEERROR_QUERY 0x2a @@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void) return (state & 0x4) ? 1 : 0; } -static int __init hp_wmi_bios_2009_later(void) +static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, sizeof(state), sizeof(state)); - if (ret) - return ret; + if (!ret) + return 1; - return (state & 0x10) ? 1 : 0; + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; } -static int hp_wmi_enable_hotkeys(void) +static int __init hp_wmi_bios_2009_later(void) { - int ret; - int query = 0x6e; + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state, + sizeof(state), sizeof(state)); + if (!ret) + return 1; - ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), - 0); + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; +} +static int __init hp_wmi_enable_hotkeys(void) +{ + int value = 0x6e; + int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, + sizeof(value), 0); if (ret) return -EINVAL; return 0; @@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void) hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); - if (hp_wmi_bios_2009_later() == 4) + if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) hp_wmi_enable_hotkeys(); status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 6740c513919c..f2372f400ddb 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -938,7 +938,7 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state) else if (result == TOS_NOT_SUPPORTED) return -ENODEV; - return result = TOS_SUCCESS ? 0 : -EIO; + return result == TOS_SUCCESS ? 0 : -EIO; } static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) @@ -2398,11 +2398,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) if (error) return error; - error = toshiba_hotkey_event_type_get(dev, &events_type); - if (error) { - pr_err("Unable to query Hotkey Event Type\n"); - return error; - } + if (toshiba_hotkey_event_type_get(dev, &events_type)) + pr_notice("Unable to query Hotkey Event Type\n"); + dev->hotkey_event_type = events_type; dev->hotkey_dev = input_allocate_device(); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index aac47573f9ed..eb391a281833 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -194,34 +194,6 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest) return true; } -/* - * Convert a raw GUID to the ACII string representation - */ -static int wmi_gtoa(const char *in, char *out) -{ - int i; - - for (i = 3; i >= 0; i--) - out += sprintf(out, "%02X", in[i] & 0xFF); - - out += sprintf(out, "-"); - out += sprintf(out, "%02X", in[5] & 0xFF); - out += sprintf(out, "%02X", in[4] & 0xFF); - out += sprintf(out, "-"); - out += sprintf(out, "%02X", in[7] & 0xFF); - out += sprintf(out, "%02X", in[6] & 0xFF); - out += sprintf(out, "-"); - out += sprintf(out, "%02X", in[8] & 0xFF); - out += sprintf(out, "%02X", in[9] & 0xFF); - out += sprintf(out, "-"); - - for (i = 10; i <= 15; i++) - out += sprintf(out, "%02X", in[i] & 0xFF); - - *out = '\0'; - return 0; -} - static bool find_guid(const char *guid_string, struct wmi_block **out) { char tmp[16], guid_input[16]; @@ -457,11 +429,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block); static void wmi_dump_wdg(const struct guid_block *g) { - char guid_string[37]; - - wmi_gtoa(g->guid, guid_string); - - pr_info("%s:\n", guid_string); + pr_info("%pUL:\n", g->guid); pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]); pr_info("\tnotify_id: %02X\n", g->notify_id); pr_info("\treserved: %02X\n", g->reserved); @@ -661,7 +629,6 @@ EXPORT_SYMBOL_GPL(wmi_has_guid); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - char guid_string[37]; struct wmi_block *wblock; wblock = dev_get_drvdata(dev); @@ -670,9 +637,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, return strlen(buf); } - wmi_gtoa(wblock->gblock.guid, guid_string); - - return sprintf(buf, "wmi:%s\n", guid_string); + return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid); } static DEVICE_ATTR_RO(modalias); @@ -695,7 +660,7 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) if (!wblock) return -ENOMEM; - wmi_gtoa(wblock->gblock.guid, guid_string); + sprintf(guid_string, "%pUL", wblock->gblock.guid); strcpy(&env->buf[env->buflen - 1], "wmi:"); memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36); @@ -721,12 +686,9 @@ static struct class wmi_class = { static int wmi_create_device(const struct guid_block *gblock, struct wmi_block *wblock, acpi_handle handle) { - char guid_string[37]; - wblock->dev.class = &wmi_class; - wmi_gtoa(gblock->guid, guid_string); - dev_set_name(&wblock->dev, "%s", guid_string); + dev_set_name(&wblock->dev, "%pUL", gblock->guid); dev_set_drvdata(&wblock->dev, wblock); @@ -877,7 +839,6 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event) struct guid_block *block; struct wmi_block *wblock; struct list_head *p; - char guid_string[37]; list_for_each(p, &wmi_block_list) { wblock = list_entry(p, struct wmi_block, list); @@ -888,8 +849,8 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event) if (wblock->handler) wblock->handler(event, wblock->handler_data); if (debug_event) { - wmi_gtoa(wblock->gblock.guid, guid_string); - pr_info("DEBUG Event GUID: %s\n", guid_string); + pr_info("DEBUG Event GUID: %pUL\n", + wblock->gblock.guid); } acpi_bus_generate_netlink_event( diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c index f4f2c1f76c32..74f2d3ff1d7c 100644 --- a/drivers/power/twl4030_charger.c +++ b/drivers/power/twl4030_charger.c @@ -91,7 +91,7 @@ #define TWL4030_MSTATEC_COMPLETE1 0x0b #define TWL4030_MSTATEC_COMPLETE4 0x0e -#if IS_ENABLED(CONFIG_TWL4030_MADC) +#if IS_REACHABLE(CONFIG_TWL4030_MADC) /* * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11) * then AC is available. @@ -1057,13 +1057,9 @@ static int twl4030_bci_probe(struct platform_device *pdev) phynode = of_find_compatible_node(bci->dev->of_node->parent, NULL, "ti,twl4030-usb"); - if (phynode) { + if (phynode) bci->transceiver = devm_usb_get_phy_by_node( bci->dev, phynode, &bci->usb_nb); - if (IS_ERR(bci->transceiver) && - PTR_ERR(bci->transceiver) == -EPROBE_DEFER) - return -EPROBE_DEFER; - } } /* Enable interrupts now. */ diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 738adfa5332b..52ea605f8130 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = { { .compatible = "fsl,anatop-regulator", }, { /* end */ } }; +MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl); static struct platform_driver anatop_regulator_driver = { .driver = { diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 01bf3476a791..a9567af7cec0 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = { AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), /* secondary switchable output of DCDC1 */ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7a85ac9e32c5..8a34f6acc801 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1394,15 +1394,19 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return 0; r = regulator_dev_lookup(dev, rdev->supply_name, &ret); - if (ret == -ENODEV) { - /* - * No supply was specified for this regulator and - * there will never be one. - */ - return 0; - } - if (!r) { + if (ret == -ENODEV) { + /* + * No supply was specified for this regulator and + * there will never be one. + */ + return 0; + } + + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) + return ret; + if (have_full_constraints()) { r = dummy_regulator_rdev; } else { @@ -1422,11 +1426,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return ret; /* Cascade always-on state to supply */ - if (_regulator_is_enabled(rdev)) { + if (_regulator_is_enabled(rdev) && rdev->supply) { ret = regulator_enable(rdev->supply); if (ret < 0) { - if (rdev->supply) - _regulator_put(rdev->supply); + _regulator_put(rdev->supply); return ret; } } diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 464018de7e97..7bba8b747f30 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = { { .compatible = "regulator-gpio", }, {}, }; +MODULE_DEVICE_TABLE(of, regulator_gpio_of_match); #endif static struct platform_driver gpio_regulator_driver = { diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c index 4fa7bcaf454e..f9d74d63be7c 100644 --- a/drivers/regulator/pbias-regulator.c +++ b/drivers/regulator/pbias-regulator.c @@ -45,6 +45,10 @@ struct pbias_regulator_data { int voltage; }; +struct pbias_of_data { + unsigned int offset; +}; + static const unsigned int pbias_volt_table[] = { 1800000, 3000000 @@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = { }; #define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) +/* Offset from SCM general area (and syscon) base */ + +static const struct pbias_of_data pbias_of_data_omap2 = { + .offset = 0x230, +}; + +static const struct pbias_of_data pbias_of_data_omap3 = { + .offset = 0x2b0, +}; + +static const struct pbias_of_data pbias_of_data_omap4 = { + .offset = 0x60, +}; + +static const struct pbias_of_data pbias_of_data_omap5 = { + .offset = 0x60, +}; + +static const struct pbias_of_data pbias_of_data_dra7 = { + .offset = 0xe00, +}; + static const struct of_device_id pbias_of_match[] = { { .compatible = "ti,pbias-omap", }, + { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, }, + { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, }, + { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, }, + { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, }, + { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, }, {}, }; MODULE_DEVICE_TABLE(of, pbias_of_match); @@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev) const struct pbias_reg_info *info; int ret = 0; int count, idx, data_idx = 0; + const struct of_device_id *match; + const struct pbias_of_data *data; + unsigned int offset; count = of_regulator_match(&pdev->dev, np, pbias_matches, PBIAS_NUM_REGS); @@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev) if (IS_ERR(syscon)) return PTR_ERR(syscon); + match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev); + if (match && match->data) { + data = match->data; + offset = data->offset; + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + offset = res->start; + dev_WARN(&pdev->dev, + "using legacy dt data for pbias offset\n"); + } + cfg.regmap = syscon; cfg.dev = &pdev->dev; @@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev) if (!info) return -ENODEV; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - drvdata[data_idx].syscon = syscon; drvdata[data_idx].info = info; drvdata[data_idx].desc.name = info->name; @@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev) drvdata[data_idx].desc.volt_table = pbias_volt_table; drvdata[data_idx].desc.n_voltages = 2; drvdata[data_idx].desc.enable_time = info->enable_time; - drvdata[data_idx].desc.vsel_reg = res->start; + drvdata[data_idx].desc.vsel_reg = offset; drvdata[data_idx].desc.vsel_mask = info->vmode; - drvdata[data_idx].desc.enable_reg = res->start; + drvdata[data_idx].desc.enable_reg = offset; drvdata[data_idx].desc.enable_mask = info->enable_mask; drvdata[data_idx].desc.enable_val = info->enable; drvdata[data_idx].desc.disable_val = info->disable_val; diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index 7f97223f95c5..a02c1b961039 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c @@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = { }; static struct tps_info tps65218_pmic_regs[] = { - TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500), + TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000), TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c index bed9d3ee4198..c810cbbd463f 100644 --- a/drivers/regulator/vexpress.c +++ b/drivers/regulator/vexpress.c @@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = { { .compatible = "arm,vexpress-volt", }, { } }; +MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match); static struct platform_driver vexpress_regulator_driver = { .probe = vexpress_regulator_probe, diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..e9fae30fafda 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -400,12 +400,16 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq) static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, struct ccw1 *ccw, int index) { + int ret; + vcdev->config_block->index = index; ccw->cmd_code = CCW_CMD_READ_VQ_CONF; ccw->flags = 0; ccw->count = sizeof(struct vq_config_block); ccw->cda = (__u32)(unsigned long)(vcdev->config_block); - ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); + if (ret) + return ret; return vcdev->config_block->num; } @@ -503,6 +507,10 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, goto out_err; } info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); + if (info->num < 0) { + err = info->num; + goto out_err; + } size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (info->queue == NULL) { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index add419d6ff34..a56a7b243e91 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = { .llseek = noop_llseek, }; +/* + * The controllers use an inline buffer instead of a mapped SGL for small, + * single entry buffers. Note that we treat a zero-length transfer like + * a mapped SGL. + */ +static bool twa_command_mapped(struct scsi_cmnd *cmd) +{ + return scsi_sg_count(cmd) != 1 || + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; +} + /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) struct scsi_cmnd *cmd = tw_dev->srb[i]; cmd->result = (DID_RESET << 16); - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); } } @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); break; case 1: SCpnt->result = (DID_ERROR << 16); - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); done(SCpnt); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - if ((scsi_sg_count(srb) == 1) && - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { + if (!twa_command_mapped(srb)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && + if (!twa_command_mapped(cmd) && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 33c74d3436c9..6bffd91b973a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) wake_up(&conn->ehwait); } -static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) +static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr && conn->ping_task) - return; + return -EINVAL; memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) hdr.ttt = RESERVED_ITT; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); - if (!task) + if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); - else if (!rhdr) { + return -EIO; + } else if (!rhdr) { /* only track our nops */ conn->ping_task = task; conn->last_ping = jiffies; } + + return 0; } static int iscsi_nop_out_rsp(struct iscsi_task *task, @@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data) if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); - iscsi_send_nopout(conn, NULL); - next_timeout = conn->last_ping + (conn->ping_timeout * HZ); + if (iscsi_send_nopout(conn, NULL)) + next_timeout = jiffies + (1 * HZ); + else + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index edb044a7b56d..0a2168e69bbc 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name) dh = __scsi_dh_lookup(name); if (!dh) { - request_module(name); + request_module("scsi_dh_%s", name); dh = __scsi_dh_lookup(name); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(cmd->request, cmd->request->errors); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 043419dcee92..8e72bcbd3d6d 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -65,7 +65,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) raw_spin_unlock_irqrestore(&intc_big_lock, flags); } -static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) +static void intc_redirect_irq(struct irq_desc *desc) { generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); } diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h index 7dff08e2a071..6ce7f0d26dcf 100644 --- a/drivers/sh/intc/internals.h +++ b/drivers/sh/intc/internals.h @@ -99,15 +99,7 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq) */ static inline void activate_irq(int irq) { -#ifdef CONFIG_ARM - /* ARM requires an extra step to clear IRQ_NOREQUEST, which it - * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. - */ - set_irq_flags(irq, IRQF_VALID); -#else - /* same effect on other architectures */ - irq_set_noprobe(irq); -#endif + irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); } static inline int intc_handle_int_cmp(const void *a, const void *b) diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index bafc51c6f0ba..e7899624aa0b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c @@ -109,7 +109,7 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq) return 0; } -static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) +static void intc_virq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct irq_data *data = irq_desc_get_irq_data(desc); @@ -127,7 +127,7 @@ static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) handle = (unsigned long)irq_desc_get_handler_data(vdesc); addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) - generic_handle_irq_desc(entry->irq, vdesc); + generic_handle_irq_desc(vdesc); } } diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index d3d1891cda3c..25abd4eb7d10 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -35,20 +35,11 @@ static struct pm_clk_notifier_block platform_bus_notifier = { static int __init sh_pm_runtime_init(void) { if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { - if (!of_machine_is_compatible("renesas,emev2") && - !of_machine_is_compatible("renesas,r7s72100") && -#ifndef CONFIG_PM_GENERIC_DOMAINS_OF - !of_machine_is_compatible("renesas,r8a73a4") && - !of_machine_is_compatible("renesas,r8a7740") && - !of_machine_is_compatible("renesas,sh73a0") && -#endif - !of_machine_is_compatible("renesas,r8a7778") && - !of_machine_is_compatible("renesas,r8a7779") && - !of_machine_is_compatible("renesas,r8a7790") && - !of_machine_is_compatible("renesas,r8a7791") && - !of_machine_is_compatible("renesas,r8a7792") && - !of_machine_is_compatible("renesas,r8a7793") && - !of_machine_is_compatible("renesas,r8a7794")) + if (!of_find_compatible_node(NULL, NULL, + "renesas,cpg-mstp-clocks")) + return 0; + if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) && + of_find_node_with_property(NULL, "#power-domain-cells")) return 0; } diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c index 6792aae9e2e5..052aecf29893 100644 --- a/drivers/soc/dove/pmu.c +++ b/drivers/soc/dove/pmu.c @@ -222,9 +222,9 @@ static void __pmu_domain_register(struct pmu_domain *domain, } /* PMU IRQ controller */ -static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pmu_irq_handler(struct irq_desc *desc) { - struct pmu_data *pmu = irq_get_handler_data(irq); + struct pmu_data *pmu = irq_desc_get_handler_data(desc); struct irq_chip_generic *gc = pmu->irq_gc; struct irq_domain *domain = pmu->irq_domain; void __iomem *base = gc->reg_base; @@ -232,7 +232,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) u32 done = ~0; if (stat == 0) { - handle_bad_irq(irq, desc); + handle_bad_irq(desc); return; } diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index bf9ed380bb1c..41e37a6a1368 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -871,14 +871,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as, * Calculate the lowest divider that satisfies the * constraint, assuming div32/fdiv/mbz == 0. */ - if (xfer->speed_hz) - scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz); - else - /* - * This can happend if max_speed is null. - * In this case, we set the lowest possible speed - */ - scbr = 0xff; + scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz); /* * If the resulting divider doesn't fit into the @@ -1300,14 +1293,12 @@ static int atmel_spi_one_transfer(struct spi_master *master, return -EINVAL; } - if (xfer->bits_per_word) { - asd = spi->controller_state; - bits = (asd->csr >> 4) & 0xf; - if (bits != xfer->bits_per_word - 8) { - dev_dbg(&spi->dev, + asd = spi->controller_state; + bits = (asd->csr >> 4) & 0xf; + if (bits != xfer->bits_per_word - 8) { + dev_dbg(&spi->dev, "you can't yet change bits_per_word in transfers\n"); - return -ENOPROTOOPT; - } + return -ENOPROTOOPT; } /* @@ -1720,6 +1711,7 @@ static int atmel_spi_runtime_resume(struct device *dev) return clk_prepare_enable(as->clk); } +#ifdef CONFIG_PM_SLEEP static int atmel_spi_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1756,6 +1748,7 @@ static int atmel_spi_resume(struct device *dev) return ret; } +#endif static const struct dev_pm_ops atmel_spi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index e7874a6171ec..3e8eeb23d4e9 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master, /* otherwise we only allow transfers within the same page * to avoid wasting time on dma_mapping when it is not practical */ - if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { + if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) { dev_warn_once(&spi->dev, "Unaligned spi tx-transfer bridging page\n"); return false; } - if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { + if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) { dev_warn_once(&spi->dev, - "Unaligned spi tx-transfer bridging page\n"); + "Unaligned spi rx-transfer bridging page\n"); return false; } diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3cf9faa6cc3f..a85d863d4a44 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev) goto free_master; } - dspi->irq = platform_get_irq(pdev, 0); - if (dspi->irq <= 0) { + ret = platform_get_irq(pdev, 0); + if (ret == 0) ret = -EINVAL; + if (ret < 0) goto free_master; - } + dspi->irq = ret; ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 5468fc70dbf8..2465259f6241 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = { { .compatible = "amlogic,meson6-spifc", }, { }, }; +MODULE_DEVICE_TABLE(of, meson_spifc_dt_match); static struct platform_driver meson_spifc_driver = { .probe = meson_spifc_probe, diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 5f6315c47920..ecb6c58238c4 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -85,7 +85,7 @@ struct mtk_spi { void __iomem *base; u32 state; u32 pad_sel; - struct clk *spi_clk, *parent_clk; + struct clk *parent_clk, *sel_clk, *spi_clk; struct spi_transfer *cur_transfer; u32 xfer_len; struct scatterlist *tx_sgl, *rx_sgl; @@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata, writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); } -static int mtk_spi_prepare_hardware(struct spi_master *master) -{ - struct spi_transfer *trans; - struct mtk_spi *mdata = spi_master_get_devdata(master); - struct spi_message *msg = master->cur_msg; - - trans = list_first_entry(&msg->transfers, struct spi_transfer, - transfer_list); - if (!trans->cs_change) { - mdata->state = MTK_SPI_IDLE; - mtk_spi_reset(mdata); - } - - return 0; -} - static int mtk_spi_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) struct mtk_spi *mdata = spi_master_get_devdata(spi->master); reg_val = readl(mdata->base + SPI_CMD_REG); - if (!enable) + if (!enable) { reg_val |= SPI_CMD_PAUSE_EN; - else + writel(reg_val, mdata->base + SPI_CMD_REG); + } else { reg_val &= ~SPI_CMD_PAUSE_EN; - writel(reg_val, mdata->base + SPI_CMD_REG); + writel(reg_val, mdata->base + SPI_CMD_REG); + mdata->state = MTK_SPI_IDLE; + mtk_spi_reset(mdata); + } } static void mtk_spi_prepare_transfer(struct spi_master *master, @@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev) master->mode_bits = SPI_CPOL | SPI_CPHA; master->set_cs = mtk_spi_set_cs; - master->prepare_transfer_hardware = mtk_spi_prepare_hardware; master->prepare_message = mtk_spi_prepare_message; master->transfer_one = mtk_spi_transfer_one; master->can_dma = mtk_spi_can_dma; @@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev) goto err_put_master; } - mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); - if (IS_ERR(mdata->spi_clk)) { - ret = PTR_ERR(mdata->spi_clk); - dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); - goto err_put_master; - } - mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); if (IS_ERR(mdata->parent_clk)) { ret = PTR_ERR(mdata->parent_clk); @@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev) goto err_put_master; } + mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); + if (IS_ERR(mdata->sel_clk)) { + ret = PTR_ERR(mdata->sel_clk); + dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); + goto err_put_master; + } + + mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); + if (IS_ERR(mdata->spi_clk)) { + ret = PTR_ERR(mdata->spi_clk); + dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); + goto err_put_master; + } + ret = clk_prepare_enable(mdata->spi_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); goto err_put_master; } - ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); + ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); goto err_disable_clk; @@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); mtk_spi_reset(mdata); - clk_disable_unprepare(mdata->spi_clk); spi_master_put(master); return 0; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index fdd791977041..a8ef38ebb9c9 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id) if (!(sccr1_reg & SSCR1_TIE)) mask &= ~SSSR_TFS; + /* Ignore RX timeout interrupt if it is disabled */ + if (!(sccr1_reg & SSCR1_TINTE)) + mask &= ~SSSR_TINT; + if (!(status & mask)) return IRQ_NONE; diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c index 2e32ea2f194f..be6155cba9de 100644 --- a/drivers/spi/spi-xtensa-xtfpga.c +++ b/drivers/spi/spi-xtensa-xtfpga.c @@ -34,13 +34,13 @@ struct xtfpga_spi { static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, unsigned addr, u32 val) { - iowrite32(val, spi->regs + addr); + __raw_writel(val, spi->regs + addr); } static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, unsigned addr) { - return ioread32(spi->regs + addr); + return __raw_readl(spi->regs + addr); } static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 3abb3903f2ad..a5f53de813d3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1610,8 +1610,7 @@ static struct class spi_master_class = { * * The caller is responsible for assigning the bus number and initializing * the master's methods before calling spi_register_master(); and (after errors - * adding the device) calling spi_master_put() and kfree() to prevent a memory - * leak. + * adding the device) calling spi_master_put() to prevent a memory leak. */ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) { diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index fba92a526531..ef008e52f953 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spidev->speed_hz = spidev->spi->max_speed_hz; + if (spidev->spi) + spidev->speed_hz = spidev->spi->max_speed_hz; /* ... after we unbound from the underlying device? */ spin_lock_irq(&spidev->spi_lock); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bdfb3c84c3cb..4a3cf9ba152f 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -451,7 +451,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid) } } -static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) +static void pmic_arb_chained_irq(struct irq_desc *desc) { struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 20288fc53946..8f3ac37bfe12 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -5,5 +5,25 @@ TODO: - add proper arch dependencies as needed - audit userspace interfaces to make sure they are sane + +ion/ + - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal + interface on top of dma-buf. flush_for_device needs to be added to dma-buf + first. + - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some + vendor trees. Should be replaced with an ioctl on the dma-buf to expose the + begin/end_cpu_access hooks to userspace. + - Clarify the tricks ion plays with explicitly managing coherency behind the + dma api's back (this is absolutely needed for high-perf gpu drivers): Add an + explicit coherency management mode to flush_for_device to be used by drivers + which want to manage caches themselves and which indicates whether cpu caches + need flushing. + - With those removed there's probably no use for ION_IOC_IMPORT anymore either + since ion would just be the central allocator for shared buffers. + - Add dt-binding to expose cma regions as ion heaps, with the rule that any + such cma regions must already be used by some device for dma. I.e. ion only + exposes existing cma regions and doesn't reserve unecessarily memory when + booting a system which doesn't use ion. + Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Arve HjønnevÃ¥g <arve@android.com> and Riley Andrews <riandrews@android.com> diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 217aa537c4eb..6e8d8392ca38 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) mutex_unlock(&client->lock); goto end; } - mutex_unlock(&client->lock); handle = ion_handle_create(client, buffer); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); goto end; + } - mutex_lock(&client->lock); ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c index 32f3a9d921d6..5cafa50d1fac 100644 --- a/drivers/staging/fbtft/fb_uc1611.c +++ b/drivers/staging/fbtft/fb_uc1611.c @@ -76,7 +76,7 @@ static int init_display(struct fbtft_par *par) /* Set CS active high */ par->spi->mode |= SPI_CS_HIGH; - ret = par->spi->master->setup(par->spi); + ret = spi_setup(par->spi); if (ret) { dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); return ret; diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c index 88fb2c0132d5..8eae6ef25846 100644 --- a/drivers/staging/fbtft/fb_watterott.c +++ b/drivers/staging/fbtft/fb_watterott.c @@ -169,7 +169,7 @@ static int init_display(struct fbtft_par *par) /* enable SPI interface by having CS and MOSI low during reset */ save_mode = par->spi->mode; par->spi->mode |= SPI_CS_HIGH; - ret = par->spi->master->setup(par->spi); /* set CS inactive low */ + ret = spi_setup(par->spi); /* set CS inactive low */ if (ret) { dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); return ret; @@ -180,7 +180,7 @@ static int init_display(struct fbtft_par *par) par->fbtftops.reset(par); mdelay(1000); par->spi->mode = save_mode; - ret = par->spi->master->setup(par->spi); + ret = spi_setup(par->spi); if (ret) { dev_err(par->info->device, "Could not restore SPI mode\n"); return ret; diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 23392eb6799e..7f5fa3d1cab0 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -1436,15 +1436,11 @@ int fbtft_probe_common(struct fbtft_display *display, /* 9-bit SPI setup */ if (par->spi && display->buswidth == 9) { - par->spi->bits_per_word = 9; - ret = par->spi->master->setup(par->spi); - if (ret) { + if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) { + par->spi->bits_per_word = 9; + } else { dev_warn(&par->spi->dev, "9-bit SPI not available, emulating using 8-bit.\n"); - par->spi->bits_per_word = 8; - ret = par->spi->master->setup(par->spi); - if (ret) - goto out_release; /* allocate buffer with room for dc bits */ par->extra = devm_kzalloc(par->info->device, par->txbuf.len + (par->txbuf.len / 8) + 8, diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c index c763efc5de7d..3f380a0086c3 100644 --- a/drivers/staging/fbtft/flexfb.c +++ b/drivers/staging/fbtft/flexfb.c @@ -463,15 +463,12 @@ static int flexfb_probe_common(struct spi_device *sdev, } par->fbtftops.write_register = fbtft_write_reg8_bus9; par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; - sdev->bits_per_word = 9; - ret = sdev->master->setup(sdev); - if (ret) { + if (par->spi->master->bits_per_word_mask + & SPI_BPW_MASK(9)) { + par->spi->bits_per_word = 9; + } else { dev_warn(dev, "9-bit SPI not available, emulating using 8-bit.\n"); - sdev->bits_per_word = 8; - ret = sdev->master->setup(sdev); - if (ret) - goto out_release; /* allocate buffer with room for dc bits */ par->extra = devm_kzalloc(par->info->device, par->txbuf.len + (par->txbuf.len / 8) + 8, diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c index 23685e74917e..bd2c69f85949 100644 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ b/drivers/staging/iio/accel/sca3000_ring.c @@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r, if (ret) goto error_ret; - for (i = 0; i < num_read; i++) + for (i = 0; i < num_read / sizeof(u16); i++) *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); if (copy_to_user(buf, rx, num_read)) diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index 3f7715c9968b..47fc00a3f63b 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -915,11 +915,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_OFFSET: if (chan->type == IIO_TEMP) { /* The calculated value from the ADC is in Kelvin, we - * want Celsius for hwmon so the offset is - * -272.15 * scale + * want Celsius for hwmon so the offset is -273.15 + * The offset is applied before scaling so it is + * actually -213.15 * 4 / 1.012 = -1079.644268 */ - *val = -1075; - *val2 = 691699; + *val = -1079; + *val2 = 644268; return IIO_VAL_INT_PLUS_MICRO; } diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt index cf0ca50ff83b..0676243eea9e 100644 --- a/drivers/staging/lustre/README.txt +++ b/drivers/staging/lustre/README.txt @@ -14,10 +14,8 @@ Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS), Lustre has independent Metadata and Data servers that clients can access in parallel to maximize performance. -In order to use Lustre client you will need to download lustre client -tools from -https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/ -the package name is lustre-client. +In order to use Lustre client you will need to download the "lustre-client" +package that contains the userspace tools from http://lustre.org/download/ You will need to install and configure your Lustre servers separately. @@ -76,12 +74,10 @@ Mount Options More Information ================ -You can get more information at -OpenSFS website: http://lustre.opensfs.org/about/ -Intel HPDD wiki: https://wiki.hpdd.intel.com +You can get more information at the Lustre website: http://wiki.lustre.org/ -Out of tree Lustre client and server code is available at: -http://git.whamcloud.com/fs/lustre-release.git +Source for the userspace tools and out-of-tree client and server code +is available at: http://git.hpdd.intel.com/fs/lustre-release.git Latest binary packages: -http://lustre.opensfs.org/download-lustre/ +http://lustre.org/download/ diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 769b61193d87..a9bc6e23fc25 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) prefetchw(&page->flags); ret = add_to_page_cache_lru(page, inode->i_mapping, offset, - GFP_KERNEL); + GFP_NOFS); if (ret == 0) { unlock_page(page); } else { diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig index d50de03de7b9..0b9b9b539f70 100644 --- a/drivers/staging/most/Kconfig +++ b/drivers/staging/most/Kconfig @@ -1,5 +1,6 @@ menuconfig MOST tristate "MOST driver" + depends on HAS_DMA select MOSTCORE default n ---help--- diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig index 1d4ad1d67758..fc548769479b 100644 --- a/drivers/staging/most/hdm-dim2/Kconfig +++ b/drivers/staging/most/hdm-dim2/Kconfig @@ -5,6 +5,7 @@ config HDM_DIM2 tristate "DIM2 HDM" depends on AIM_NETWORK + depends on HAS_IOMEM ---help--- Say Y here if you want to connect via MediaLB to network transceiver. diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig index a482c3fdf34b..ec1546312ee6 100644 --- a/drivers/staging/most/hdm-usb/Kconfig +++ b/drivers/staging/most/hdm-usb/Kconfig @@ -4,7 +4,7 @@ config HDM_USB tristate "USB HDM" - depends on USB + depends on USB && NET select AIM_NETWORK ---help--- Say Y here if you want to connect via USB to network tranceiver. diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig index 38abf1b21b66..47172546d728 100644 --- a/drivers/staging/most/mostcore/Kconfig +++ b/drivers/staging/most/mostcore/Kconfig @@ -4,6 +4,7 @@ config MOSTCORE tristate "MOST Core" + depends on HAS_DMA ---help--- Say Y here if you want to enable MOST support. diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig index cf5fe9bb87a1..d7f62359d743 100644 --- a/drivers/staging/rdma/Kconfig +++ b/drivers/staging/rdma/Kconfig @@ -24,6 +24,8 @@ if STAGING_RDMA source "drivers/staging/rdma/amso1100/Kconfig" +source "drivers/staging/rdma/ehca/Kconfig" + source "drivers/staging/rdma/hfi1/Kconfig" source "drivers/staging/rdma/ipath/Kconfig" diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile index cbd915ac7f20..139d78ef2c24 100644 --- a/drivers/staging/rdma/Makefile +++ b/drivers/staging/rdma/Makefile @@ -1,4 +1,5 @@ # Entries for RDMA_STAGING tree obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/ +obj-$(CONFIG_INFINIBAND_EHCA) += ehca/ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ obj-$(CONFIG_INFINIBAND_IPATH) += ipath/ diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig index 59f807d8d58e..3fadd2ad6426 100644 --- a/drivers/infiniband/hw/ehca/Kconfig +++ b/drivers/staging/rdma/ehca/Kconfig @@ -2,7 +2,8 @@ config INFINIBAND_EHCA tristate "eHCA support" depends on IBMEBUS ---help--- - This driver supports the IBM pSeries eHCA InfiniBand adapter. + This driver supports the deprecated IBM pSeries eHCA InfiniBand + adapter. To compile the driver as a module, choose M here. The module will be called ib_ehca. diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile index 74d284e46a40..74d284e46a40 100644 --- a/drivers/infiniband/hw/ehca/Makefile +++ b/drivers/staging/rdma/ehca/Makefile diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO new file mode 100644 index 000000000000..199a4a600142 --- /dev/null +++ b/drivers/staging/rdma/ehca/TODO @@ -0,0 +1,4 @@ +9/2015 + +The ehca driver has been deprecated and moved to drivers/staging/rdma. +It will be removed in the 4.6 merge window. diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c index 465926319f3d..465926319f3d 100644 --- a/drivers/infiniband/hw/ehca/ehca_av.c +++ b/drivers/staging/rdma/ehca/ehca_av.c diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h index bd45e0f3923f..bd45e0f3923f 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/staging/rdma/ehca/ehca_classes.h diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h index 689c35786dd2..689c35786dd2 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +++ b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c index 9b68b175069b..9b68b175069b 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/staging/rdma/ehca/ehca_cq.c diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c index 90da6747d395..90da6747d395 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/staging/rdma/ehca/ehca_eq.c diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c index e8b1bb65797a..e8b1bb65797a 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/staging/rdma/ehca/ehca_hca.c diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c index 8615d7cf7e01..8615d7cf7e01 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/staging/rdma/ehca/ehca_irq.c diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h index 5370199f08c7..5370199f08c7 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.h +++ b/drivers/staging/rdma/ehca/ehca_irq.h diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h index 80e6a3d5df3e..80e6a3d5df3e 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/staging/rdma/ehca/ehca_iverbs.h diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c index 8246418cd4e0..8246418cd4e0 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/staging/rdma/ehca/ehca_main.c diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c index cec181532924..cec181532924 100644 --- a/drivers/infiniband/hw/ehca/ehca_mcast.c +++ b/drivers/staging/rdma/ehca/ehca_mcast.c diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c index f914b30999f8..f914b30999f8 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/staging/rdma/ehca/ehca_mrmw.c diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h index 50d8b51306dd..50d8b51306dd 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.h +++ b/drivers/staging/rdma/ehca/ehca_mrmw.h diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c index 351577a6670a..351577a6670a 100644 --- a/drivers/infiniband/hw/ehca/ehca_pd.c +++ b/drivers/staging/rdma/ehca/ehca_pd.c diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h index 90c4efa67586..90c4efa67586 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/staging/rdma/ehca/ehca_qes.h diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c index 2e89356c46fa..2e89356c46fa 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/staging/rdma/ehca/ehca_qp.c diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c index 47f94984353d..47f94984353d 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/staging/rdma/ehca/ehca_reqs.c diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c index 376b031c2c7f..376b031c2c7f 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/staging/rdma/ehca/ehca_sqp.c diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h index d280b12aae64..d280b12aae64 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/staging/rdma/ehca/ehca_tools.h diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c index 1a1d5d99fcf9..1a1d5d99fcf9 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/staging/rdma/ehca/ehca_uverbs.c diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c index 89517ffb4389..89517ffb4389 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/staging/rdma/ehca/hcp_if.c diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h index a46e514c367b..a46e514c367b 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.h +++ b/drivers/staging/rdma/ehca/hcp_if.h diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c index 077376ff3d28..077376ff3d28 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/staging/rdma/ehca/hcp_phyp.c diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h index d1b029910249..d1b029910249 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/staging/rdma/ehca/hcp_phyp.h diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h index 9dac93d02140..9dac93d02140 100644 --- a/drivers/infiniband/hw/ehca/hipz_fns.h +++ b/drivers/staging/rdma/ehca/hipz_fns.h diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h index 868735fd3187..868735fd3187 100644 --- a/drivers/infiniband/hw/ehca/hipz_fns_core.h +++ b/drivers/staging/rdma/ehca/hipz_fns_core.h diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h index bf996c7acc42..bf996c7acc42 100644 --- a/drivers/infiniband/hw/ehca/hipz_hw.h +++ b/drivers/staging/rdma/ehca/hipz_hw.h diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c index 7ffc748cb973..7ffc748cb973 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/staging/rdma/ehca/ipz_pt_fn.c diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h index a801274ea337..a801274ea337 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/staging/rdma/ehca/ipz_pt_fn.h diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 654eafef1d30..aa58e597df06 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -2710,7 +2710,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) if (sleep_ok) { mutex_lock(&ppd->hls_lock); } else { - while (mutex_trylock(&ppd->hls_lock) == EBUSY) + while (!mutex_trylock(&ppd->hls_lock)) udelay(1); } @@ -2758,7 +2758,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) if (sleep_ok) { mutex_lock(&dd->pport->hls_lock); } else { - while (mutex_trylock(&dd->pport->hls_lock) == EBUSY) + while (!mutex_trylock(&dd->pport->hls_lock)) udelay(1); } diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c index 07c87a87775f..bc26a5392712 100644 --- a/drivers/staging/rdma/hfi1/device.c +++ b/drivers/staging/rdma/hfi1/device.c @@ -57,11 +57,13 @@ #include "device.h" static struct class *class; +static struct class *user_class; static dev_t hfi1_dev; int hfi1_cdev_init(int minor, const char *name, const struct file_operations *fops, - struct cdev *cdev, struct device **devp) + struct cdev *cdev, struct device **devp, + bool user_accessible) { const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); struct device *device = NULL; @@ -78,7 +80,11 @@ int hfi1_cdev_init(int minor, const char *name, goto done; } - device = device_create(class, NULL, dev, NULL, "%s", name); + if (user_accessible) + device = device_create(user_class, NULL, dev, NULL, "%s", name); + else + device = device_create(class, NULL, dev, NULL, "%s", name); + if (!IS_ERR(device)) goto done; ret = PTR_ERR(device); @@ -110,6 +116,26 @@ const char *class_name(void) return hfi1_class_name; } +static char *hfi1_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0600; + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + +static const char *hfi1_class_name_user = "hfi1_user"; +const char *class_name_user(void) +{ + return hfi1_class_name_user; +} + +static char *hfi1_user_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0666; + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + int __init dev_init(void) { int ret; @@ -125,7 +151,22 @@ int __init dev_init(void) ret = PTR_ERR(class); pr_err("Could not create device class (err %d)\n", -ret); unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); + goto done; } + class->devnode = hfi1_devnode; + + user_class = class_create(THIS_MODULE, class_name_user()); + if (IS_ERR(user_class)) { + ret = PTR_ERR(user_class); + pr_err("Could not create device class for user accessible files (err %d)\n", + -ret); + class_destroy(class); + class = NULL; + user_class = NULL; + unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); + goto done; + } + user_class->devnode = hfi1_user_devnode; done: return ret; @@ -133,10 +174,11 @@ done: void dev_cleanup(void) { - if (class) { - class_destroy(class); - class = NULL; - } + class_destroy(class); + class = NULL; + + class_destroy(user_class); + user_class = NULL; unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); } diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h index 98caecd3d807..2850ff739d81 100644 --- a/drivers/staging/rdma/hfi1/device.h +++ b/drivers/staging/rdma/hfi1/device.h @@ -52,7 +52,8 @@ int hfi1_cdev_init(int minor, const char *name, const struct file_operations *fops, - struct cdev *cdev, struct device **devp); + struct cdev *cdev, struct device **devp, + bool user_accessible); void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); const char *class_name(void); int __init dev_init(void); diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 6777d6b659cf..3e8d5ac4c626 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -292,7 +292,7 @@ int hfi1_diag_add(struct hfi1_devdata *dd) if (atomic_inc_return(&diagpkt_count) == 1) { ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, &diagpkt_file_ops, &diagpkt_cdev, - &diagpkt_device); + &diagpkt_device, false); } return ret; @@ -592,7 +592,8 @@ static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name) ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, &snoop_file_ops, - &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev); + &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev, + false); if (ret) { dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); @@ -1012,11 +1013,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: memset(&link_info, 0, sizeof(link_info)); - ret = copy_from_user(&link_info, + if (copy_from_user(&link_info, (struct hfi1_link_info __user *)arg, - sizeof(link_info)); - if (ret) - break; + sizeof(link_info))) + ret = -EFAULT; value = link_info.port_state; index = link_info.port_number; @@ -1080,9 +1080,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { memset(&link_info, 0, sizeof(link_info)); - ret = copy_from_user(&link_info, + if (copy_from_user(&link_info, (struct hfi1_link_info __user *)arg, - sizeof(link_info)); + sizeof(link_info))) + ret = -EFAULT; index = link_info.port_number; } else { ret = __get_user(index, (int __user *) arg); @@ -1114,9 +1115,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ppd->link_speed_active; link_info.link_width_active = ppd->link_width_active; - ret = copy_to_user( + if (copy_to_user( (struct hfi1_link_info __user *)arg, - &link_info, sizeof(link_info)); + &link_info, sizeof(link_info))) + ret = -EFAULT; } else { ret = __put_user(value, (int __user *)arg); } @@ -1142,10 +1144,9 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) snoop_dbg("Setting filter"); /* just copy command structure */ argp = (unsigned long *)arg; - ret = copy_from_user(&filter_cmd, (void __user *)argp, - sizeof(filter_cmd)); - if (ret < 0) { - pr_alert("Error copying filter command\n"); + if (copy_from_user(&filter_cmd, (void __user *)argp, + sizeof(filter_cmd))) { + ret = -EFAULT; break; } if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { @@ -1167,12 +1168,11 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) break; } /* copy remaining data from userspace */ - ret = copy_from_user((u8 *)filter_value, + if (copy_from_user((u8 *)filter_value, (void __user *)filter_cmd.value_ptr, - filter_cmd.length); - if (ret < 0) { + filter_cmd.length)) { kfree(filter_value); - pr_alert("Error copying filter data\n"); + ret = -EFAULT; break; } /* Drain packets first */ diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 469861750b76..72d38500d8ce 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1181,6 +1181,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) struct hfi1_filedata *fd = fp->private_data; int ret = 0; + memset(&cinfo, 0, sizeof(cinfo)); ret = hfi1_get_base_kinfo(uctxt, &cinfo); if (ret < 0) goto done; @@ -2089,14 +2090,16 @@ static int user_add(struct hfi1_devdata *dd) if (atomic_inc_return(&user_count) == 1) { ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, - &wildcard_cdev, &wildcard_device); + &wildcard_cdev, &wildcard_device, + true); if (ret) goto done; } snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, - &dd->user_cdev, &dd->user_device); + &dd->user_cdev, &dd->user_device, + true); if (ret) goto done; @@ -2104,7 +2107,8 @@ static int user_add(struct hfi1_devdata *dd) snprintf(name, sizeof(name), "%s_ui%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, - &dd->ui_cdev, &dd->ui_device); + &dd->ui_cdev, &dd->ui_device, + false); if (ret) goto done; } diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 37269eb90c34..b2c1b72d38ce 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1717,9 +1717,9 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, psi->port_states.portphysstate_portstate = (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf); psi->link_width_downgrade_tx_active = - ppd->link_width_downgrade_tx_active; + cpu_to_be16(ppd->link_width_downgrade_tx_active); psi->link_width_downgrade_rx_active = - ppd->link_width_downgrade_rx_active; + cpu_to_be16(ppd->link_width_downgrade_rx_active); if (resp_len) *resp_len += sizeof(struct opa_port_state_info); diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index a8c903caecce..aecd1a74741c 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -737,7 +737,7 @@ u16 sdma_get_descq_cnt(void) */ if (!is_power_of_2(count)) return SDMA_DESCQ_CNT; - if (count < 64 && count > 32768) + if (count < 64 || count > 32768) return SDMA_DESCQ_CNT; return count; } @@ -1848,7 +1848,7 @@ static void dump_sdma_state(struct sdma_engine *sde) dd_dev_err(sde->dd, "\taidx: %u amode: %u alen: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) - >> SDMA_DESC1_HEADER_INDEX_MASK), + >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) @@ -1926,7 +1926,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) - >> SDMA_DESC1_HEADER_INDEX_MASK), + >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT)); head = (head + 1) & sde->sdma_mask; diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 1e613fcd8f4c..496086903891 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -109,53 +109,53 @@ /* * Bits defined in the send DMA descriptor. */ -#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL<<63) -#define SDMA_DESC0_LAST_DESC_FLAG (1ULL<<62) +#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63) +#define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62) #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 #define SDMA_DESC0_BYTE_COUNT_MASK \ - ((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL) + ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1) #define SDMA_DESC0_BYTE_COUNT_SMASK \ - (SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT) + (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT) #define SDMA_DESC0_PHY_ADDR_SHIFT 0 #define SDMA_DESC0_PHY_ADDR_WIDTH 48 #define SDMA_DESC0_PHY_ADDR_MASK \ - ((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL) + ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1) #define SDMA_DESC0_PHY_ADDR_SMASK \ - (SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT) + (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT) #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 #define SDMA_DESC1_HEADER_UPDATE1_MASK \ - ((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL) + ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1) #define SDMA_DESC1_HEADER_UPDATE1_SMASK \ - (SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT) + (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT) #define SDMA_DESC1_HEADER_MODE_SHIFT 13 #define SDMA_DESC1_HEADER_MODE_WIDTH 3 #define SDMA_DESC1_HEADER_MODE_MASK \ - ((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL) + ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1) #define SDMA_DESC1_HEADER_MODE_SMASK \ - (SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT) + (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT) #define SDMA_DESC1_HEADER_INDEX_SHIFT 8 #define SDMA_DESC1_HEADER_INDEX_WIDTH 5 #define SDMA_DESC1_HEADER_INDEX_MASK \ - ((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL) + ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1) #define SDMA_DESC1_HEADER_INDEX_SMASK \ - (SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT) + (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT) #define SDMA_DESC1_HEADER_DWS_SHIFT 4 #define SDMA_DESC1_HEADER_DWS_WIDTH 4 #define SDMA_DESC1_HEADER_DWS_MASK \ - ((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL) + ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1) #define SDMA_DESC1_HEADER_DWS_SMASK \ - (SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT) + (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT) #define SDMA_DESC1_GENERATION_SHIFT 2 #define SDMA_DESC1_GENERATION_WIDTH 2 #define SDMA_DESC1_GENERATION_MASK \ - ((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL) + ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) #define SDMA_DESC1_GENERATION_SMASK \ - (SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT) -#define SDMA_DESC1_INT_REQ_FLAG (1ULL<<1) -#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL<<0) + (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) +#define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1) +#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0) enum sdma_states { sdma_state_s00_hw_down, diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 53ac21431542..41bb59eb001c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -749,11 +749,13 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *tx; tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); - if (!tx) + if (!tx) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); - if (tx) - tx->qp = qp; + if (IS_ERR(tx)) + return tx; + } + tx->qp = qp; return tx; } diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 4299cf45f947..5e1f16c36b49 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) __this_cpu_write(reporting_keystroke, true); input_report_key(virt_keyboard, KEY_DOWN, PRESSED); input_report_key(virt_keyboard, KEY_DOWN, RELEASED); + input_sync(virt_keyboard); __this_cpu_write(reporting_keystroke, false); /* reenable preemption */ diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile index fa27ee5f336c..fc790e7592fc 100644 --- a/drivers/staging/unisys/visorbus/Makefile +++ b/drivers/staging/unisys/visorbus/Makefile @@ -10,4 +10,3 @@ visorbus-y += visorchipset.o visorbus-y += periodic_work.o ccflags-y += -Idrivers/staging/unisys/include -ccflags-y += -Idrivers/staging/unisys/visorutil diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c index 2309f5f2b238..a272b48bab28 100644 --- a/drivers/staging/unisys/visorbus/visorbus_main.c +++ b/drivers/staging/unisys/visorbus/visorbus_main.c @@ -37,6 +37,8 @@ static int visorbus_debugref; #define POLLJIFFIES_TESTWORK 100 #define POLLJIFFIES_NORMALCHANNEL 10 +static int busreg_rc = -ENODEV; /* stores the result from bus registration */ + static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env); static int visorbus_match(struct device *xdev, struct device_driver *xdrv); static void fix_vbus_dev_info(struct visor_device *visordev); @@ -863,6 +865,9 @@ int visorbus_register_visor_driver(struct visor_driver *drv) { int rc = 0; + if (busreg_rc < 0) + return -ENODEV; /*can't register on a nonexistent bus*/ + drv->driver.name = drv->name; drv->driver.bus = &visorbus_type; drv->driver.probe = visordriver_probe_device; @@ -885,6 +890,8 @@ int visorbus_register_visor_driver(struct visor_driver *drv) if (rc < 0) return rc; rc = register_driver_attributes(drv); + if (rc < 0) + driver_unregister(&drv->driver); return rc; } EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); @@ -1260,10 +1267,8 @@ remove_bus_instance(struct visor_device *dev) static int create_bus_type(void) { - int rc = 0; - - rc = bus_register(&visorbus_type); - return rc; + busreg_rc = bus_register(&visorbus_type); + return busreg_rc; } /** Remove the one-and-only one instance of the visor bus type (visorbus_type). diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 8c9da7ea7845..9d3c1e282062 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -1189,16 +1189,16 @@ visornic_rx(struct uiscmdrsp *cmdrsp) spin_lock_irqsave(&devdata->priv_lock, flags); atomic_dec(&devdata->num_rcvbuf_in_iovm); - /* update rcv stats - call it with priv_lock held */ - devdata->net_stats.rx_packets++; - devdata->net_stats.rx_bytes = skb->len; - /* set length to how much was ACTUALLY received - * NOTE: rcv_done_len includes actual length of data rcvd * including ethhdr */ skb->len = cmdrsp->net.rcv.rcv_done_len; + /* update rcv stats - call it with priv_lock held */ + devdata->net_stats.rx_packets++; + devdata->net_stats.rx_bytes += skb->len; + /* test enabled while holding lock */ if (!(devdata->enabled && devdata->enab_dis_acked)) { /* don't process it unless we're in enable mode and until @@ -1924,13 +1924,16 @@ static int visornic_probe(struct visor_device *dev) "%s debugfs_create_dir %s failed\n", __func__, netdev->name); err = -ENOMEM; - goto cleanup_xmit_cmdrsp; + goto cleanup_register_netdev; } dev_info(&dev->device, "%s success netdev=%s\n", __func__, netdev->name); return 0; +cleanup_register_netdev: + unregister_netdev(netdev); + cleanup_napi_add: del_timer_sync(&devdata->irq_poll_timer); netif_napi_del(&devdata->napi); @@ -2128,8 +2131,9 @@ static int visornic_init(void) if (!dev_num_pool) goto cleanup_workqueue; - visorbus_register_visor_driver(&visornic_driver); - return 0; + err = visorbus_register_visor_driver(&visornic_driver); + if (!err) + return 0; cleanup_workqueue: if (visornic_timeout_reset_workqueue) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index e8a52f7d6204..51d1734d5390 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) TYPERANGE_UTF8, USE_INITIAL_ONLY); if (!param) goto out; + /* * Extra parameters for ISER from RFC-5046 */ @@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, SESSIONTYPE)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, IFMARKER)) { - SET_PSTATE_NEGOTIATE(param); + SET_PSTATE_REJECT(param); } else if (!strcmp(param->name, OFMARKER)) { - SET_PSTATE_NEGOTIATE(param); + SET_PSTATE_REJECT(param); } else if (!strcmp(param->name, IFMARKINT)) { SET_PSTATE_REJECT(param); } else if (!strcmp(param->name, OFMARKINT)) { diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index dcc424ac35d4..88ea4e4f124b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) struct se_session *se_sess = se_cmd->se_sess; struct se_node_acl *nacl = se_sess->se_node_acl; struct se_dev_entry *deve; + sense_reason_t ret = TCM_NO_SENSE; rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if (deve) { atomic_long_inc(&deve->total_cmds); - if ((se_cmd->data_direction == DMA_TO_DEVICE) && - (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { - pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" - " Access for 0x%08llx\n", - se_cmd->se_tfo->get_fabric_name(), - unpacked_lun); - rcu_read_unlock(); - return TCM_WRITE_PROTECTED; - } - if (se_cmd->data_direction == DMA_TO_DEVICE) atomic_long_add(se_cmd->data_length, &deve->write_bytes); @@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) percpu_ref_get(&se_lun->lun_ref); se_cmd->lun_ref_active = true; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08llx\n", + se_cmd->se_tfo->get_fabric_name(), + unpacked_lun); + rcu_read_unlock(); + ret = TCM_WRITE_PROTECTED; + goto ref_dev; + } } rcu_read_unlock(); @@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) unpacked_lun); return TCM_NON_EXISTENT_LUN; } - /* - * Force WRITE PROTECT for virtual LUN 0 - */ - if ((se_cmd->data_direction != DMA_FROM_DEVICE) && - (se_cmd->data_direction != DMA_NONE)) - return TCM_WRITE_PROTECTED; se_lun = se_sess->se_tpg->tpg_virt_lun0; se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; @@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) percpu_ref_get(&se_lun->lun_ref); se_cmd->lun_ref_active = true; + + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) { + ret = TCM_WRITE_PROTECTED; + goto ref_dev; + } } /* * RCU reference protected by percpu se_lun->lun_ref taken above that @@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) * pointer can be kfree_rcu() by the final se_lun->lun_group put via * target_core_fabric_configfs.c:target_fabric_port_release */ +ref_dev: se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); atomic_long_inc(&se_cmd->se_dev->num_cmds); @@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) atomic_long_add(se_cmd->data_length, &se_cmd->se_dev->read_bytes); - return 0; + return ret; } EXPORT_SYMBOL(transport_lookup_cmd_lun); @@ -427,8 +433,6 @@ void core_disable_device_list_for_node( hlist_del_rcu(&orig->link); clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); - rcu_assign_pointer(orig->se_lun, NULL); - rcu_assign_pointer(orig->se_lun_acl, NULL); orig->lun_flags = 0; orig->creation_time = 0; orig->attach_count--; @@ -439,6 +443,9 @@ void core_disable_device_list_for_node( kref_put(&orig->pr_kref, target_pr_kref_release); wait_for_completion(&orig->pr_comp); + rcu_assign_pointer(orig->se_lun, NULL); + rcu_assign_pointer(orig->se_lun_acl, NULL); + kfree_rcu(orig, rcu_head); core_scsi3_free_pr_reg_from_nacl(dev, nacl); diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 9522960c7fdd..22390e0e046c 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -187,5 +187,5 @@ core_delete_hba(struct se_hba *hba) bool target_sense_desc_format(struct se_device *dev) { - return dev->transport->get_blocks(dev) > U32_MAX; + return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false; } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 5a9982f5d5d6..0f19e11acac2 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -105,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev) mode = FMODE_READ|FMODE_EXCL; if (!ib_dev->ibd_readonly) mode |= FMODE_WRITE; + else + dev->dev_flags |= DF_READ_ONLY; bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); if (IS_ERR(bd)) { diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5ab7100de17e..e7933115087a 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( struct se_device *dev, struct se_node_acl *nacl, struct se_lun *lun, - struct se_dev_entry *deve, + struct se_dev_entry *dest_deve, u64 mapped_lun, unsigned char *isid, u64 sa_res_key, @@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); atomic_set(&pr_reg->pr_res_holders, 0); pr_reg->pr_reg_nacl = nacl; - pr_reg->pr_reg_deve = deve; + /* + * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1, + * the se_dev_entry->pr_ref will have been already obtained by + * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration(). + * + * Otherwise, locate se_dev_entry now and obtain a reference until + * registration completes in __core_scsi3_add_registration(). + */ + if (dest_deve) { + pr_reg->pr_reg_deve = dest_deve; + } else { + rcu_read_lock(); + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); + if (!pr_reg->pr_reg_deve) { + rcu_read_unlock(); + pr_err("Unable to locate PR deve %s mapped_lun: %llu\n", + nacl->initiatorname, mapped_lun); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return NULL; + } + kref_get(&pr_reg->pr_reg_deve->pr_kref); + rcu_read_unlock(); + } pr_reg->pr_res_mapped_lun = mapped_lun; pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; @@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration( !(strcmp(pr_reg->pr_tport, t_port)) && (pr_reg->pr_reg_tpgt == tpgt) && (pr_reg->pr_aptpl_target_lun == target_lun)) { + /* + * Obtain the ->pr_reg_deve pointer + reference, that + * is released by __core_scsi3_add_registration() below. + */ + rcu_read_lock(); + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); + if (!pr_reg->pr_reg_deve) { + pr_err("Unable to locate PR APTPL %s mapped_lun:" + " %llu\n", nacl->initiatorname, mapped_lun); + rcu_read_unlock(); + continue; + } + kref_get(&pr_reg->pr_reg_deve->pr_kref); + rcu_read_unlock(); pr_reg->pr_reg_nacl = nacl; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; - list_del(&pr_reg->pr_reg_aptpl_list); spin_unlock(&pr_tmpl->aptpl_reg_lock); /* * At this point all of the pointers in *pr_reg will * be setup, so go ahead and add the registration. */ - __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); /* * If this registration is the reservation holder, @@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration( __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); spin_unlock(&pr_tmpl->registration_lock); - - rcu_read_lock(); - deve = pr_reg->pr_reg_deve; - if (deve) - set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); - rcu_read_unlock(); - /* * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. */ if (!pr_reg->pr_reg_all_tg_pt || register_move) - return; + goto out; /* * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 * allocated in __core_scsi3_alloc_registration() @@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration( __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, register_type); spin_unlock(&pr_tmpl->registration_lock); - + /* + * Drop configfs group dependency reference and deve->pr_kref + * obtained from __core_scsi3_alloc_registration() code. + */ rcu_read_lock(); deve = pr_reg_tmp->pr_reg_deve; - if (deve) + if (deve) { set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); + core_scsi3_lunacl_undepend_item(deve); + pr_reg_tmp->pr_reg_deve = NULL; + } rcu_read_unlock(); - - /* - * Drop configfs group dependency reference from - * __core_scsi3_alloc_registration() - */ - core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); } +out: + /* + * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration() + */ + rcu_read_lock(); + deve = pr_reg->pr_reg_deve; + if (deve) { + set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); + kref_put(&deve->pr_kref, target_pr_kref_release); + pr_reg->pr_reg_deve = NULL; + } + rcu_read_unlock(); } static int core_scsi3_alloc_registration( @@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port( dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? dest_se_deve->mapped_lun : 0); - if (!dest_se_deve) + if (!dest_se_deve) { + kref_put(&local_pr_reg->pr_reg_deve->pr_kref, + target_pr_kref_release); continue; - + } core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1823,9 +1864,11 @@ out: kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); - if (!dest_se_deve) + if (!dest_se_deve) { + kref_put(&local_pr_reg->pr_reg_deve->pr_kref, + target_pr_kref_release); continue; - + } core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 2d0381dd105c..5fb9dd7f08bb 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -668,7 +668,10 @@ int core_tpg_add_lun( list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - lun->lun_access = lun_access; + if (dev->dev_flags & DF_READ_ONLY) + lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; + else + lun->lun_access = lun_access; if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); mutex_unlock(&tpg->tpg_lun_mutex); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 039004400987..5aabc4bc0d75 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -163,7 +163,7 @@ config THERMAL_EMULATION config HISI_THERMAL tristate "Hisilicon thermal driver" - depends on ARCH_HISI && CPU_THERMAL && OF + depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST help Enable this to plug hisilicon's thermal sensor driver into the Linux thermal framework. cpufreq is used as the cooling device to throttle @@ -182,7 +182,7 @@ config IMX_THERMAL config SPEAR_THERMAL bool "SPEAr thermal sensor driver" - depends on PLAT_SPEAR + depends on PLAT_SPEAR || COMPILE_TEST depends on OF help Enable this to plug the SPEAr thermal sensor driver into the Linux @@ -190,7 +190,7 @@ config SPEAR_THERMAL config ROCKCHIP_THERMAL tristate "Rockchip thermal driver" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on RESET_CONTROLLER help Rockchip thermal driver provides support for Temperature sensor @@ -208,7 +208,7 @@ config RCAR_THERMAL config KIRKWOOD_THERMAL tristate "Temperature sensor on Marvell Kirkwood SoCs" - depends on MACH_KIRKWOOD + depends on MACH_KIRKWOOD || COMPILE_TEST depends on OF help Support for the Kirkwood thermal sensor driver into the Linux thermal @@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL config DOVE_THERMAL tristate "Temperature sensor on Marvell Dove SoCs" - depends on ARCH_DOVE || MACH_DOVE + depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST depends on OF help Support for the Dove thermal sensor driver in the Linux thermal @@ -234,7 +234,7 @@ config DB8500_THERMAL config ARMADA_THERMAL tristate "Armada 370/XP thermal management" - depends on ARCH_MVEBU + depends on ARCH_MVEBU || COMPILE_TEST depends on OF help Enable this option if you want to have support for thermal management @@ -349,11 +349,12 @@ config INTEL_PCH_THERMAL programmable trip points and other information. menu "Texas Instruments thermal drivers" +depends on ARCH_HAS_BANDGAP || COMPILE_TEST source "drivers/thermal/ti-soc-thermal/Kconfig" endmenu menu "Samsung thermal drivers" -depends on ARCH_EXYNOS +depends on ARCH_EXYNOS || COMPILE_TEST source "drivers/thermal/samsung/Kconfig" endmenu @@ -364,7 +365,7 @@ endmenu config QCOM_SPMI_TEMP_ALARM tristate "Qualcomm SPMI PMIC Temperature Alarm" - depends on OF && SPMI && IIO + depends on OF && (SPMI || COMPILE_TEST) && IIO select REGMAP_SPMI help This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 620dcd405ff6..42c6f71bdcc1 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, * efficiently. Power is stored in mW, frequency in KHz. The * resulting table is in ascending order. * - * Return: 0 on success, -E* on error. + * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs, + * -ENOMEM if we run out of memory or -EAGAIN if an OPP was + * added/enabled while the function was executing. */ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, u32 capacitance) @@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, int num_opps = 0, cpu, i, ret = 0; unsigned long freq; - rcu_read_lock(); - for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { dev = get_cpu_device(cpu); if (!dev) { @@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, } num_opps = dev_pm_opp_get_opp_count(dev); - if (num_opps > 0) { + if (num_opps > 0) break; - } else if (num_opps < 0) { - ret = num_opps; - goto unlock; - } + else if (num_opps < 0) + return num_opps; } - if (num_opps == 0) { - ret = -EINVAL; - goto unlock; - } + if (num_opps == 0) + return -EINVAL; power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); - if (!power_table) { - ret = -ENOMEM; - goto unlock; - } + if (!power_table) + return -ENOMEM; + + rcu_read_lock(); for (freq = 0, i = 0; opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); @@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, u32 freq_mhz, voltage_mv; u64 power; + if (i >= num_opps) { + rcu_read_unlock(); + ret = -EAGAIN; + goto free_power_table; + } + freq_mhz = freq / 1000000; voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; @@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, power_table[i].power = power; } - if (i == 0) { + rcu_read_unlock(); + + if (i != num_opps) { ret = PTR_ERR(opp); - goto unlock; + goto free_power_table; } cpufreq_device->cpu_dev = dev; cpufreq_device->dyn_power_table = power_table; cpufreq_device->dyn_power_table_entries = i; -unlock: - rcu_read_unlock(); + return 0; + +free_power_table: + kfree(power_table); + return ret; } @@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np, ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); if (ret) { cool_dev = ERR_PTR(ret); - goto free_table; + goto free_power_table; } snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", @@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np, remove_idr: release_idr(&cpufreq_idr, cpufreq_dev->id); +free_power_table: + kfree(cpufreq_dev->dyn_power_table); free_table: kfree(cpufreq_dev->freq_table); free_time_in_idle_timestamp: @@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) thermal_cooling_device_unregister(cpufreq_dev->cool_dev); release_idr(&cpufreq_idr, cpufreq_dev->id); + kfree(cpufreq_dev->dyn_power_table); kfree(cpufreq_dev->time_in_idle_timestamp); kfree(cpufreq_dev->time_in_idle); kfree(cpufreq_dev->freq_table); diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c index 607b62c7e611..e58bd0b658b5 100644 --- a/drivers/thermal/db8500_cpufreq_cooling.c +++ b/drivers/thermal/db8500_cpufreq_cooling.c @@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = { { .compatible = "stericsson,db8500-cpufreq-cooling" }, {}, }; +MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match); #endif static struct platform_driver db8500_cpufreq_cooling_driver = { diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 9c8a7aad0252..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -24,6 +24,8 @@ #include "thermal_core.h" +#define INVALID_TRIP -1 + #define FRAC_BITS 10 #define int_to_frac(x) ((x) << FRAC_BITS) #define frac_to_int(x) ((x) >> FRAC_BITS) @@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y) /** * struct power_allocator_params - parameters for the power allocator governor + * @allocated_tzp: whether we have allocated tzp for this thermal zone and + * it needs to be freed on unbind * @err_integral: accumulated error in the PID controller. * @prev_err: error in the previous iteration of the PID controller. * Used to calculate the derivative term. * @trip_switch_on: first passive trip point of the thermal zone. The * governor switches on when this trip point is crossed. + * If the thermal zone only has one passive trip point, + * @trip_switch_on should be INVALID_TRIP. * @trip_max_desired_temperature: last passive trip point of the thermal * zone. The temperature we are * controlling for. */ struct power_allocator_params { + bool allocated_tzp; s64 err_integral; s32 prev_err; int trip_switch_on; @@ -73,6 +80,98 @@ struct power_allocator_params { }; /** + * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone + * @tz: thermal zone we are operating in + * + * For thermal zones that don't provide a sustainable_power in their + * thermal_zone_params, estimate one. Calculate it using the minimum + * power of all the cooling devices as that gives a valid value that + * can give some degree of functionality. For optimal performance of + * this governor, provide a sustainable_power in the thermal zone's + * thermal_zone_params. + */ +static u32 estimate_sustainable_power(struct thermal_zone_device *tz) +{ + u32 sustainable_power = 0; + struct thermal_instance *instance; + struct power_allocator_params *params = tz->governor_data; + + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + struct thermal_cooling_device *cdev = instance->cdev; + u32 min_power; + + if (instance->trip != params->trip_max_desired_temperature) + continue; + + if (power_actor_get_min_power(cdev, tz, &min_power)) + continue; + + sustainable_power += min_power; + } + + return sustainable_power; +} + +/** + * estimate_pid_constants() - Estimate the constants for the PID controller + * @tz: thermal zone for which to estimate the constants + * @sustainable_power: sustainable power for the thermal zone + * @trip_switch_on: trip point number for the switch on temperature + * @control_temp: target temperature for the power allocator governor + * @force: whether to force the update of the constants + * + * This function is used to update the estimation of the PID + * controller constants in struct thermal_zone_parameters. + * Sustainable power is provided in case it was estimated. The + * estimated sustainable_power should not be stored in the + * thermal_zone_parameters so it has to be passed explicitly to this + * function. + * + * If @force is not set, the values in the thermal zone's parameters + * are preserved if they are not zero. If @force is set, the values + * in thermal zone's parameters are overwritten. + */ +static void estimate_pid_constants(struct thermal_zone_device *tz, + u32 sustainable_power, int trip_switch_on, + int control_temp, bool force) +{ + int ret; + int switch_on_temp; + u32 temperature_threshold; + + ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp); + if (ret) + switch_on_temp = 0; + + temperature_threshold = control_temp - switch_on_temp; + /* + * estimate_pid_constants() tries to find appropriate default + * values for thermal zones that don't provide them. If a + * system integrator has configured a thermal zone with two + * passive trip points at the same temperature, that person + * hasn't put any effort to set up the thermal zone properly + * so just give up. + */ + if (!temperature_threshold) + return; + + if (!tz->tzp->k_po || force) + tz->tzp->k_po = int_to_frac(sustainable_power) / + temperature_threshold; + + if (!tz->tzp->k_pu || force) + tz->tzp->k_pu = int_to_frac(2 * sustainable_power) / + temperature_threshold; + + if (!tz->tzp->k_i || force) + tz->tzp->k_i = int_to_frac(10) / 1000; + /* + * The default for k_d and integral_cutoff is 0, so we can + * leave them as they are. + */ +} + +/** * pid_controller() - PID controller * @tz: thermal zone we are operating in * @current_temp: the current temperature in millicelsius @@ -98,10 +197,20 @@ static u32 pid_controller(struct thermal_zone_device *tz, { s64 p, i, d, power_range; s32 err, max_power_frac; + u32 sustainable_power; struct power_allocator_params *params = tz->governor_data; max_power_frac = int_to_frac(max_allocatable_power); + if (tz->tzp->sustainable_power) { + sustainable_power = tz->tzp->sustainable_power; + } else { + sustainable_power = estimate_sustainable_power(tz); + estimate_pid_constants(tz, sustainable_power, + params->trip_switch_on, control_temp, + true); + } + err = control_temp - current_temp; err = int_to_frac(err); @@ -139,7 +248,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, power_range = p + i + d; /* feed-forward the known sustainable dissipatable power */ - power_range = tz->tzp->sustainable_power + frac_to_int(power_range); + power_range = sustainable_power + frac_to_int(power_range); power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); @@ -247,6 +356,11 @@ static int allocate_power(struct thermal_zone_device *tz, } } + if (!num_actors) { + ret = -ENODEV; + goto unlock; + } + /* * We need to allocate five arrays of the same size: * req_power, max_power, granted_power, extra_actor_power and @@ -340,43 +454,66 @@ unlock: return ret; } -static int get_governor_trips(struct thermal_zone_device *tz, - struct power_allocator_params *params) +/** + * get_governor_trips() - get the number of the two trip points that are key for this governor + * @tz: thermal zone to operate on + * @params: pointer to private data for this governor + * + * The power allocator governor works optimally with two trips points: + * a "switch on" trip point and a "maximum desired temperature". These + * are defined as the first and last passive trip points. + * + * If there is only one trip point, then that's considered to be the + * "maximum desired temperature" trip point and the governor is always + * on. If there are no passive or active trip points, then the + * governor won't do anything. In fact, its throttle function + * won't be called at all. + */ +static void get_governor_trips(struct thermal_zone_device *tz, + struct power_allocator_params *params) { - int i, ret, last_passive; + int i, last_active, last_passive; bool found_first_passive; found_first_passive = false; - last_passive = -1; - ret = -EINVAL; + last_active = INVALID_TRIP; + last_passive = INVALID_TRIP; for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; + int ret; ret = tz->ops->get_trip_type(tz, i, &type); - if (ret) - return ret; + if (ret) { + dev_warn(&tz->device, + "Failed to get trip point %d type: %d\n", i, + ret); + continue; + } - if (!found_first_passive) { - if (type == THERMAL_TRIP_PASSIVE) { + if (type == THERMAL_TRIP_PASSIVE) { + if (!found_first_passive) { params->trip_switch_on = i; found_first_passive = true; + } else { + last_passive = i; } - } else if (type == THERMAL_TRIP_PASSIVE) { - last_passive = i; + } else if (type == THERMAL_TRIP_ACTIVE) { + last_active = i; } else { break; } } - if (last_passive != -1) { + if (last_passive != INVALID_TRIP) { params->trip_max_desired_temperature = last_passive; - ret = 0; + } else if (found_first_passive) { + params->trip_max_desired_temperature = params->trip_switch_on; + params->trip_switch_on = INVALID_TRIP; } else { - ret = -EINVAL; + params->trip_switch_on = INVALID_TRIP; + params->trip_max_desired_temperature = last_active; } - - return ret; } static void reset_pid_controller(struct power_allocator_params *params) @@ -405,60 +542,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz) * power_allocator_bind() - bind the power_allocator governor to a thermal zone * @tz: thermal zone to bind it to * - * Check that the thermal zone is valid for this governor, that is, it - * has two thermal trips. If so, initialize the PID controller - * parameters and bind it to the thermal zone. + * Initialize the PID controller parameters and bind it to the thermal + * zone. * - * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM - * if we ran out of memory. + * Return: 0 on success, or -ENOMEM if we ran out of memory. */ static int power_allocator_bind(struct thermal_zone_device *tz) { int ret; struct power_allocator_params *params; - int switch_on_temp, control_temp; - u32 temperature_threshold; - - if (!tz->tzp || !tz->tzp->sustainable_power) { - dev_err(&tz->device, - "power_allocator: missing sustainable_power\n"); - return -EINVAL; - } + int control_temp; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; - ret = get_governor_trips(tz, params); - if (ret) { - dev_err(&tz->device, - "thermal zone %s has wrong trip setup for power allocator\n", - tz->type); - goto free; - } + if (!tz->tzp) { + tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL); + if (!tz->tzp) { + ret = -ENOMEM; + goto free_params; + } - ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, - &switch_on_temp); - if (ret) - goto free; + params->allocated_tzp = true; + } - ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, - &control_temp); - if (ret) - goto free; + if (!tz->tzp->sustainable_power) + dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n"); - temperature_threshold = control_temp - switch_on_temp; + get_governor_trips(tz, params); - tz->tzp->k_po = tz->tzp->k_po ?: - int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; - tz->tzp->k_pu = tz->tzp->k_pu ?: - int_to_frac(2 * tz->tzp->sustainable_power) / - temperature_threshold; - tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; - /* - * The default for k_d and integral_cutoff is 0, so we can - * leave them as they are. - */ + if (tz->trips > 0) { + ret = tz->ops->get_trip_temp(tz, + params->trip_max_desired_temperature, + &control_temp); + if (!ret) + estimate_pid_constants(tz, tz->tzp->sustainable_power, + params->trip_switch_on, + control_temp, false); + } reset_pid_controller(params); @@ -466,14 +588,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz) return 0; -free: +free_params: kfree(params); + return ret; } static void power_allocator_unbind(struct thermal_zone_device *tz) { + struct power_allocator_params *params = tz->governor_data; + dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); + + if (params->allocated_tzp) { + kfree(tz->tzp); + tz->tzp = NULL; + } + kfree(tz->governor_data); tz->governor_data = NULL; } @@ -499,13 +630,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, &switch_on_temp); - if (ret) { - dev_warn(&tz->device, - "Failed to get switch on temperature: %d\n", ret); - return ret; - } - - if (current_temp < switch_on_temp) { + if (!ret && (current_temp < switch_on_temp)) { tz->passive = 0; reset_pid_controller(params); allow_maximum_power(tz); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 0bae8cc6c23a..ca920b0ecf8f 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -932,7 +932,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, if (data->soc == SOC_ARCH_EXYNOS5260) emul_con = EXYNOS5260_EMUL_CON; - if (data->soc == SOC_ARCH_EXYNOS5433) + else if (data->soc == SOC_ARCH_EXYNOS5433) emul_con = EXYNOS5433_TMU_EMUL_CON; else if (data->soc == SOC_ARCH_EXYNOS7) emul_con = EXYNOS7_TMU_REG_EMUL_CON; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5e5fc7015c7f..d9e525cc9c1c 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1013,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev, } /** + * power_actor_get_min_power() - get the mainimum power that a cdev can consume + * @cdev: pointer to &thermal_cooling_device + * @tz: a valid thermal zone device pointer + * @min_power: pointer in which to store the minimum power + * + * Calculate the minimum power consumption in milliwatts that the + * cooling device can currently consume and store it in @min_power. + * + * Return: 0 on success, -EINVAL if @cdev doesn't support the + * power_actor API or -E* on other error. + */ +int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *min_power) +{ + unsigned long max_state; + int ret; + + if (!cdev_is_power_actor(cdev)) + return -EINVAL; + + ret = cdev->ops->get_max_state(cdev, &max_state); + if (ret) + return ret; + + return cdev->ops->state2power(cdev, tz, max_state, min_power); +} + +/** * power_actor_set_power() - limit the maximum power that a cooling device can consume * @cdev: pointer to &thermal_cooling_device * @instance: thermal instance to update diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig index bd4c7beba679..cb6686ff09ae 100644 --- a/drivers/thermal/ti-soc-thermal/Kconfig +++ b/drivers/thermal/ti-soc-thermal/Kconfig @@ -1,7 +1,5 @@ config TI_SOC_THERMAL tristate "Texas Instruments SoCs temperature sensor driver" - depends on THERMAL - depends on ARCH_HAS_BANDGAP help If you say yes here you get support for the Texas Instruments OMAP4460+ on die bandgap temperature sensor support. The register @@ -24,7 +22,7 @@ config TI_THERMAL config OMAP4_THERMAL bool "Texas Instruments OMAP4 thermal support" depends on TI_SOC_THERMAL - depends on ARCH_OMAP4 + depends on ARCH_OMAP4 || COMPILE_TEST help If you say yes here you get thermal support for the Texas Instruments OMAP4 SoC family. The current chip supported are: @@ -38,7 +36,7 @@ config OMAP4_THERMAL config OMAP5_THERMAL bool "Texas Instruments OMAP5 thermal support" depends on TI_SOC_THERMAL - depends on SOC_OMAP5 + depends on SOC_OMAP5 || COMPILE_TEST help If you say yes here you get thermal support for the Texas Instruments OMAP5 SoC family. The current chip supported are: @@ -50,7 +48,7 @@ config OMAP5_THERMAL config DRA752_THERMAL bool "Texas Instruments DRA752 thermal support" depends on TI_SOC_THERMAL - depends on SOC_DRA7XX + depends on SOC_DRA7XX || COMPILE_TEST help If you say yes here you get thermal support for the Texas Instruments DRA752 SoC family. The current chip supported are: diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index c68fe1222c16..20a41f7de76f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -643,7 +643,7 @@ static struct pci_device_id nhi_ids[] = { { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, - .subvendor = 0x2222, .subdevice = 0x1111, + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { 0,} }; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 20932cc9c8f7..b09023b07169 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty) spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status |= TIOCPKT_FLUSHREAD; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - if (waitqueue_active(&tty->link->read_wait)) - wake_up_interruptible(&tty->link->read_wait); + wake_up_interruptible(&tty->link->read_wait); } } @@ -1382,8 +1381,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); return 0; } } @@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); } } @@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) } /* The termios change make the tty ready for I/O */ - if (waitqueue_active(&tty->write_wait)) - wake_up_interruptible(&tty->write_wait); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible(&tty->read_wait); + wake_up_interruptible(&tty->write_wait); + wake_up_interruptible(&tty->read_wait); } /** diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 21d01a491405..e508939daea3 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p) return 0; dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - if (dma->tx_size < p->port.fifosize) { - ret = -EINVAL; - goto err; - } desc = dmaengine_prep_slave_single(dma->txchan, dma->tx_addr + xmit->tail, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 54e6c8ddef5d..0bbf34035d6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */ UART_FCR7_64BYTE, .flags = UART_CAP_FIFO, }, + [PORT_RT2880] = { + .name = "Palmchip BK-3103", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ @@ -2910,3 +2918,5 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) } #endif /* CONFIG_SERIAL_8250_CONSOLE */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 5ca5cf3e9359..538ea03bc101 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev) ret = atmel_init_gpios(port, &pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to initialize GPIOs."); - goto err; + goto err_clear_bit; } ret = atmel_init_port(port, pdev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fe3d41cc8416..d0388a071ba1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count) int locked = 1; int retval; - retval = clk_prepare_enable(sport->clk_per); + retval = clk_enable(sport->clk_per); if (retval) return; - retval = clk_prepare_enable(sport->clk_ipg); + retval = clk_enable(sport->clk_ipg); if (retval) { - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_per); return; } @@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); - clk_disable_unprepare(sport->clk_ipg); - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_ipg); + clk_disable(sport->clk_per); } /* @@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options) retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); - clk_disable_unprepare(sport->clk_ipg); + clk_disable(sport->clk_ipg); + if (retval) { + clk_unprepare(sport->clk_ipg); + goto error_console; + } + + retval = clk_prepare(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5a3fa8913880..a660ab181cca 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) atomic_inc(&buf->priority); mutex_lock(&buf->lock); - while ((next = buf->head->next) != NULL) { + /* paired w/ release in __tty_buffer_request_room; ensures there are + * no pending memory accesses to the freed buffer + */ + while ((next = smp_load_acquire(&buf->head->next)) != NULL) { tty_buffer_free(port, buf->head); buf->head = next; } @@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, if (n != NULL) { n->flags = flags; buf->tail = n; - b->commit = b->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&b->commit, b->used); /* paired w/ acquire in flush_to_ldisc(); ensures the * latest commit value can be read before the head is * advanced to the next buffer @@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - buf->tail->commit = buf->tail->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&buf->tail->commit, buf->tail->used); schedule_work(&buf->work); } EXPORT_SYMBOL(tty_schedule_flip); @@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_struct *tty; struct tty_ldisc *disc; - tty = port->itty; + tty = READ_ONCE(port->itty); if (tty == NULL) return; @@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work) * is advancing to the next buffer */ next = smp_load_acquire(&head->next); - count = head->commit - head->read; + /* paired w/ release in __tty_buffer_request_room() or in + * tty_buffer_flush(); ensures we see the committed buffer data + */ + count = smp_load_acquire(&head->commit) - head->read; if (!count) { if (next == NULL) { check_other_closed(tty); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 02785d844354..2eefaa6e3e3a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2128,8 +2128,24 @@ retry_open: if (!noctty && current->signal->leader && !current->signal->tty && - tty->session == NULL) - __proc_set_tty(tty); + tty->session == NULL) { + /* + * Don't let a process that only has write access to the tty + * obtain the privileges associated with having a tty as + * controlling terminal (being able to reopen it with full + * access through /dev/tty, being able to perform pushback). + * Many distributions set the group of all ttys to "tty" and + * grant write-only access to all terminals for setgid tty + * binaries, which should not imply full privileges on all ttys. + * + * This could theoretically break old code that performs open() + * on a write-only file descriptor. In that case, it might be + * necessary to also permit this if + * inode_permission(inode, MAY_READ) == 0. + */ + if (filp->f_mode & FMODE_READ) + __proc_set_tty(tty); + } spin_unlock_irq(¤t->sighand->siglock); read_unlock(&tasklist_lock); tty_unlock(tty); @@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p) * Takes ->siglock() when updating signal->tty */ -static int tiocsctty(struct tty_struct *tty, int arg) +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) { int ret = 0; @@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg) goto unlock; } } + + /* See the comment in tty_open(). */ + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto unlock; + } + proc_set_tty(tty); unlock: read_unlock(&tasklist_lock); @@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) no_tty(); return 0; case TIOCSCTTY: - return tiocsctty(tty, arg); + return tiocsctty(tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: @@ -3151,13 +3174,18 @@ struct class *tty_class; static int tty_cdev_add(struct tty_driver *driver, dev_t dev, unsigned int index, unsigned int count) { + int err; + /* init here, since reused cdevs cause crashes */ driver->cdevs[index] = cdev_alloc(); if (!driver->cdevs[index]) return -ENOMEM; - cdev_init(driver->cdevs[index], &tty_fops); + driver->cdevs[index]->ops = &tty_fops; driver->cdevs[index]->owner = driver->owner; - return cdev_add(driver->cdevs[index], dev, count); + err = cdev_add(driver->cdevs[index], dev, count); + if (err) + kobject_put(&driver->cdevs[index]->kobj); + return err; } /** diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 867e9f3f3859..dcc50c878159 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -61,7 +61,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data}, { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data}, - { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data}, + { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c index 9eae1a16cef9..4456d2cf80ff 100644 --- a/drivers/usb/chipidea/ci_hdrc_usb2.c +++ b/drivers/usb/chipidea/ci_hdrc_usb2.c @@ -12,6 +12,7 @@ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/usb/chipidea.h> @@ -30,18 +31,36 @@ static const struct ci_hdrc_platform_data ci_default_pdata = { .flags = CI_HDRC_DISABLE_STREAMING, }; +static struct ci_hdrc_platform_data ci_zynq_pdata = { + .capoffset = DEF_CAPOFFSET, +}; + +static const struct of_device_id ci_hdrc_usb2_of_match[] = { + { .compatible = "chipidea,usb2"}, + { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata}, + { } +}; +MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match); + static int ci_hdrc_usb2_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ci_hdrc_usb2_priv *priv; struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev); int ret; + const struct of_device_id *match; if (!ci_pdata) { ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL); *ci_pdata = ci_default_pdata; /* struct copy */ } + match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev); + if (match && match->data) { + /* struct copy */ + *ci_pdata = *(struct ci_hdrc_platform_data *)match->data; + } + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -96,12 +115,6 @@ static int ci_hdrc_usb2_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id ci_hdrc_usb2_of_match[] = { - { .compatible = "chipidea,usb2" }, - { } -}; -MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match); - static struct platform_driver ci_hdrc_usb2_driver = { .probe = ci_hdrc_usb2_probe, .remove = ci_hdrc_usb2_remove, diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index a637da25dda0..8223fe73ea85 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -656,6 +656,44 @@ __acquires(hwep->lock) return 0; } +static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer) +{ + struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); + int direction, retval = 0; + unsigned long flags; + + if (ep == NULL || hwep->ep.desc == NULL) + return -EINVAL; + + if (usb_endpoint_xfer_isoc(hwep->ep.desc)) + return -EOPNOTSUPP; + + spin_lock_irqsave(hwep->lock, flags); + + if (value && hwep->dir == TX && check_transfer && + !list_empty(&hwep->qh.queue) && + !usb_endpoint_xfer_control(hwep->ep.desc)) { + spin_unlock_irqrestore(hwep->lock, flags); + return -EAGAIN; + } + + direction = hwep->dir; + do { + retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); + + if (!value) + hwep->wedge = 0; + + if (hwep->type == USB_ENDPOINT_XFER_CONTROL) + hwep->dir = (hwep->dir == TX) ? RX : TX; + + } while (hwep->dir != direction); + + spin_unlock_irqrestore(hwep->lock, flags); + return retval; +} + + /** * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts * @gadget: gadget @@ -1051,7 +1089,7 @@ __acquires(ci->lock) num += ci->hw_ep_max / 2; spin_unlock(&ci->lock); - err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep); + err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false); spin_lock(&ci->lock); if (!err) isr_setup_status_phase(ci); @@ -1117,8 +1155,8 @@ delegate: if (err < 0) { spin_unlock(&ci->lock); - if (usb_ep_set_halt(&hwep->ep)) - dev_err(ci->dev, "error: ep_set_halt\n"); + if (_ep_set_halt(&hwep->ep, 1, false)) + dev_err(ci->dev, "error: _ep_set_halt\n"); spin_lock(&ci->lock); } } @@ -1149,9 +1187,9 @@ __acquires(ci->lock) err = isr_setup_status_phase(ci); if (err < 0) { spin_unlock(&ci->lock); - if (usb_ep_set_halt(&hwep->ep)) + if (_ep_set_halt(&hwep->ep, 1, false)) dev_err(ci->dev, - "error: ep_set_halt\n"); + "error: _ep_set_halt\n"); spin_lock(&ci->lock); } } @@ -1397,41 +1435,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) */ static int ep_set_halt(struct usb_ep *ep, int value) { - struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); - int direction, retval = 0; - unsigned long flags; - - if (ep == NULL || hwep->ep.desc == NULL) - return -EINVAL; - - if (usb_endpoint_xfer_isoc(hwep->ep.desc)) - return -EOPNOTSUPP; - - spin_lock_irqsave(hwep->lock, flags); - -#ifndef STALL_IN - /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ - if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX && - !list_empty(&hwep->qh.queue)) { - spin_unlock_irqrestore(hwep->lock, flags); - return -EAGAIN; - } -#endif - - direction = hwep->dir; - do { - retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); - - if (!value) - hwep->wedge = 0; - - if (hwep->type == USB_ENDPOINT_XFER_CONTROL) - hwep->dir = (hwep->dir == TX) ? RX : TX; - - } while (hwep->dir != direction); - - spin_unlock_irqrestore(hwep->lock, flags); - return retval; + return _ep_set_halt(ep, value, true); } /** diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b2a540b43f97..b9ddf0c1ffe5 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 16; } else if (usb_endpoint_xfer_isoc(&ep->desc) && - desc->bmAttributes > 2) { + USB_SS_MULT(desc->bmAttributes) > 3) { dev_warn(ddev, "Isoc endpoint has Mult of %d in " "config %d interface %d altsetting %d ep %d: " "setting to 3\n", desc->bmAttributes + 1, @@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, } if (usb_endpoint_xfer_isoc(&ep->desc)) - max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * + max_tx = (desc->bMaxBurst + 1) * + (USB_SS_MULT(desc->bmAttributes)) * usb_endpoint_maxp(&ep->desc); else if (usb_endpoint_xfer_int(&ep->desc)) max_tx = usb_endpoint_maxp(&ep->desc) * diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d85abfed84cc..f5a381945db2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech ConferenceCam CC3000e */ + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Logitech PTZ Pro Camera */ + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Plantronic Audio 655 DSP */ + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Plantronic Audio 648 USB */ + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Artisman Watchdog Dongle */ { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index a5a1b7c45743..22e9606d8e08 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -514,8 +514,6 @@ static int dwc3_omap_probe(struct platform_device *pdev) goto err1; } - dwc3_omap_enable_irqs(omap); - ret = dwc3_omap_extcon_register(omap); if (ret < 0) goto err2; @@ -526,6 +524,8 @@ static int dwc3_omap_probe(struct platform_device *pdev) goto err3; } + dwc3_omap_enable_irqs(omap); + return 0; err3: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0c25704dcb6b..1e8bdf817811 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2665,8 +2665,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc) int i; irqreturn_t ret = IRQ_NONE; - spin_lock(&dwc->lock); - for (i = 0; i < dwc->num_event_buffers; i++) { irqreturn_t status; @@ -2675,8 +2673,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc) ret = status; } - spin_unlock(&dwc->lock); - return ret; } diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index 978435a51038..6399c106a3a5 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -186,6 +186,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget) list_for_each_entry (ep, &gadget->ep_list, ep_list) { ep->claimed = false; + ep->driver_data = NULL; } gadget->in_epnum = 0; gadget->out_epnum = 0; diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c index fdacddb18c00..175ca93fe5e2 100644 --- a/drivers/usb/gadget/udc/amd5536udc.c +++ b/drivers/usb/gadget/udc/amd5536udc.c @@ -3138,8 +3138,8 @@ static void udc_pci_remove(struct pci_dev *pdev) writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); if (dev->irq_registered) free_irq(pdev->irq, dev); - if (dev->regs) - iounmap(dev->regs); + if (dev->virt_addr) + iounmap(dev->virt_addr); if (dev->mem_region) release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); @@ -3226,17 +3226,13 @@ static int udc_pci_probe( /* init */ dev = kzalloc(sizeof(struct udc), GFP_KERNEL); - if (!dev) { - retval = -ENOMEM; - goto finished; - } + if (!dev) + return -ENOMEM; /* pci setup */ if (pci_enable_device(pdev) < 0) { - kfree(dev); - dev = NULL; retval = -ENODEV; - goto finished; + goto err_pcidev; } dev->active = 1; @@ -3246,28 +3242,22 @@ static int udc_pci_probe( if (!request_mem_region(resource, len, name)) { dev_dbg(&pdev->dev, "pci device used already\n"); - kfree(dev); - dev = NULL; retval = -EBUSY; - goto finished; + goto err_memreg; } dev->mem_region = 1; dev->virt_addr = ioremap_nocache(resource, len); if (dev->virt_addr == NULL) { dev_dbg(&pdev->dev, "start address cannot be mapped\n"); - kfree(dev); - dev = NULL; retval = -EFAULT; - goto finished; + goto err_ioremap; } if (!pdev->irq) { dev_err(&pdev->dev, "irq not set\n"); - kfree(dev); - dev = NULL; retval = -ENODEV; - goto finished; + goto err_irq; } spin_lock_init(&dev->lock); @@ -3283,10 +3273,8 @@ static int udc_pci_probe( if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq); - kfree(dev); - dev = NULL; retval = -EBUSY; - goto finished; + goto err_irq; } dev->irq_registered = 1; @@ -3314,8 +3302,17 @@ static int udc_pci_probe( return 0; finished: - if (dev) - udc_pci_remove(pdev); + udc_pci_remove(pdev); + return retval; + +err_irq: + iounmap(dev->virt_addr); +err_ioremap: + release_mem_region(resource, len); +err_memreg: + pci_disable_device(pdev); +err_pcidev: + kfree(dev); return retval; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 3dfada8d6061..f0f2b066ac08 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -2002,6 +2002,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, ep->udc = udc; INIT_LIST_HEAD(&ep->queue); + if (ep->index == 0) { + ep->ep.caps.type_control = true; + } else { + ep->ep.caps.type_iso = ep->can_isoc; + ep->ep.caps.type_bulk = true; + ep->ep.caps.type_int = true; + } + + ep->ep.caps.dir_in = true; + ep->ep.caps.dir_out = true; + if (i) list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 5c8f4effb62a..ccb9c213cc9f 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -324,8 +324,7 @@ static void bdc_mem_free(struct bdc *bdc) bdc->scratchpad.buff, bdc->scratchpad.sp_dma); /* Destroy the dma pools */ - if (bdc->bd_table_pool) - dma_pool_destroy(bdc->bd_table_pool); + dma_pool_destroy(bdc->bd_table_pool); /* Free the bdc_ep array */ kfree(bdc->bdc_ep_array); diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d1b81539d632..d6199507f861 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, GFP_ATOMIC, &dma); - if (!bd_table->start_bd) + if (!bd_table->start_bd) { + kfree(bd_table); goto fail; + } bd_table->dma = dma; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 1379ad40d864..27af0f008b57 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -1348,6 +1348,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb, { struct dummy *dum = dum_hcd->dum; struct dummy_request *req; + int sent = 0; top: /* if there's no request queued, the device is NAKing; return */ @@ -1385,12 +1386,15 @@ top: if (len == 0) break; - /* use an extra pass for the final short packet */ - if (len > ep->ep.maxpacket) { - rescan = 1; - len -= (len % ep->ep.maxpacket); + /* send multiple of maxpacket first, then remainder */ + if (len >= ep->ep.maxpacket) { + is_short = 0; + if (len % ep->ep.maxpacket) + rescan = 1; + len -= len % ep->ep.maxpacket; + } else { + is_short = 1; } - is_short = (len % ep->ep.maxpacket) != 0; len = dummy_perform_transfer(urb, req, len); @@ -1399,6 +1403,7 @@ top: req->req.status = len; } else { limit -= len; + sent += len; urb->actual_length += len; req->req.actual += len; } @@ -1421,7 +1426,7 @@ top: *status = -EOVERFLOW; else *status = 0; - } else if (!to_host) { + } else { *status = 0; if (host_len > dev_len) req->req.status = -EOVERFLOW; @@ -1429,15 +1434,24 @@ top: req->req.status = 0; } - /* many requests terminate without a short packet */ + /* + * many requests terminate without a short packet. + * send a zlp if demanded by flags. + */ } else { - if (req->req.length == req->req.actual - && !req->req.zero) - req->req.status = 0; - if (urb->transfer_buffer_length == urb->actual_length - && !(urb->transfer_flags - & URB_ZERO_PACKET)) - *status = 0; + if (req->req.length == req->req.actual) { + if (req->req.zero && to_host) + rescan = 1; + else + req->req.status = 0; + } + if (urb->transfer_buffer_length == urb->actual_length) { + if (urb->transfer_flags & URB_ZERO_PACKET && + !to_host) + rescan = 1; + else + *status = 0; + } } /* device side completion --> continuable */ @@ -1460,7 +1474,7 @@ top: if (rescan) goto top; } - return limit; + return sent; } static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) @@ -1890,7 +1904,7 @@ restart: default: treat_control_like_bulk: ep->last_io = jiffies; - total = transfer(dum_hcd, urb, ep, limit, &status); + total -= transfer(dum_hcd, urb, ep, limit, &status); break; } diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 8aa2593c2c36..b9429bc42511 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -2117,8 +2117,7 @@ static int gr_remove(struct platform_device *pdev) return -EBUSY; gr_dfs_delete(dev); - if (dev->desc_pool) - dma_pool_destroy(dev->desc_pool); + dma_pool_destroy(dev->desc_pool); platform_set_drvdata(pdev, NULL); gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index 4c489692745e..dafe74eb9ade 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -1767,8 +1767,7 @@ static int mv_u3d_remove(struct platform_device *dev) usb_del_gadget_udc(&u3d->gadget); /* free memory allocated in probe */ - if (u3d->trb_pool) - dma_pool_destroy(u3d->trb_pool); + dma_pool_destroy(u3d->trb_pool); if (u3d->ep_context) dma_free_coherent(&dev->dev, u3d->ep_context_size, diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 339af51df57d..81b6229c7805 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2100,8 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev) } /* free memory allocated in probe */ - if (udc->dtd_pool) - dma_pool_destroy(udc->dtd_pool); + dma_pool_destroy(udc->dtd_pool); if (udc->ep_dqh) dma_free_coherent(&pdev->dev, udc->ep_dqh_size, diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 9a8c936cd42c..41f841fa6c4d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, * use Event Data TRBs, and we don't chain in a link TRB on short * transfers, we're basically dividing by 1. * - * xHCI 1.0 specification indicates that the Average TRB Length should - * be set to 8 for control endpoints. + * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length + * should be set to 8 for control endpoints. */ - if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); else ep_ctx->tx_info |= @@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int size; int i, j, num_ports; - if (timer_pending(&xhci->cmd_timer)) - del_timer_sync(&xhci->cmd_timer); + del_timer_sync(&xhci->cmd_timer); /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); @@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) INIT_LIST_HEAD(&xhci->cmd_list); + /* init command timeout timer */ + setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, + (unsigned long)xhci); + page_size = readl(&xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); @@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) "Wrote ERST address to ir_set 0."); xhci_print_ir_set(xhci, 0); - /* init command timeout timer */ - setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, - (unsigned long)xhci); - /* * XXX: Might need to set the Interrupter Moderation Register to * something other than the default (~1ms minimum between interrupts). diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5590eac2b22d..c47d3e480586 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -147,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; + xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || @@ -180,51 +181,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) "QUIRK: Resetting on resume"); } -/* - * In some Intel xHCI controllers, in order to get D3 working, - * through a vendor specific SSIC CONFIG register at offset 0x883c, - * SSIC PORT need to be marked as "unused" before putting xHCI - * into D3. After D3 exit, the SSIC port need to be marked as "used". - * Without this change, xHCI might not enter D3 state. - * Make sure PME works on some Intel xHCI controllers by writing 1 to clear - * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 - */ -static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - u32 val; - void __iomem *reg; - - if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { - - reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; - - /* Notify SSIC that SSIC profile programming is not done */ - val = readl(reg) & ~PROG_DONE; - writel(val, reg); - - /* Mark SSIC port as unused(suspend) or used(resume) */ - val = readl(reg); - if (suspend) - val |= SSIC_PORT_UNUSED; - else - val &= ~SSIC_PORT_UNUSED; - writel(val, reg); - - /* Notify SSIC that SSIC profile programming is done */ - val = readl(reg) | PROG_DONE; - writel(val, reg); - readl(reg); - } - - reg = (void __iomem *) xhci->cap_regs + 0x80a4; - val = readl(reg); - writel(val | BIT(28), reg); - readl(reg); -} - #ifdef CONFIG_ACPI static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { @@ -345,6 +301,51 @@ static void xhci_pci_remove(struct pci_dev *dev) } #ifdef CONFIG_PM +/* + * In some Intel xHCI controllers, in order to get D3 working, + * through a vendor specific SSIC CONFIG register at offset 0x883c, + * SSIC PORT need to be marked as "unused" before putting xHCI + * into D3. After D3 exit, the SSIC port need to be marked as "used". + * Without this change, xHCI might not enter D3 state. + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 + */ +static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + u32 val; + void __iomem *reg; + + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; + + /* Notify SSIC that SSIC profile programming is not done */ + val = readl(reg) & ~PROG_DONE; + writel(val, reg); + + /* Mark SSIC port as unused(suspend) or used(resume) */ + val = readl(reg); + if (suspend) + val |= SSIC_PORT_UNUSED; + else + val &= ~SSIC_PORT_UNUSED; + writel(val, reg); + + /* Notify SSIC that SSIC profile programming is done */ + val = readl(reg) | PROG_DONE; + writel(val, reg); + readl(reg); + } + + reg = (void __iomem *) xhci->cap_regs + 0x80a4; + val = readl(reg); + writel(val | BIT(28), reg); + readl(reg); +} + static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a47a1e897086..97ffe3997273 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->cmd_ring, CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { + /* we are about to kill xhci, give it one more chance */ + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); + udelay(1000); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 3 * 1000 * 1000); + if (ret == 0) + return 0; + xhci_err(xhci, "Stopped the command ring failed, " "maybe the host is dead\n"); xhci->xhc_state |= XHCI_STATE_DYING; @@ -2182,6 +2191,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, } /* Fast path - was this the last TRB in the TD for this URB? */ } else if (event_trb == td->last_trb) { + if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX) + return finish_td(xhci, td, event_trb, event, ep, + status, false); + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - @@ -2233,6 +2246,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_SHORT_TX) { + xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n"); + td->urb_length_set = true; + return 0; + } } return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2265,6 +2284,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, u32 trb_comp_code; int ret = 0; int td_num = 0; + bool handling_skipped_tds = false; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -2401,6 +2421,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = true; xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); goto cleanup; + case COMP_PING_ERR: + ep->skip = true; + xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); + goto cleanup; default: if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { status = 0; @@ -2537,13 +2561,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep, &status); cleanup: + + + handling_skipped_tds = ep->skip && + trb_comp_code != COMP_MISSED_INT && + trb_comp_code != COMP_PING_ERR; + /* - * Do not update event ring dequeue pointer if ep->skip is set. - * Will roll back to continue process missed tds. + * Do not update event ring dequeue pointer if we're in a loop + * processing missed tds. */ - if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { + if (!handling_skipped_tds) inc_deq(xhci, xhci->event_ring); - } if (ret) { urb = td->urb; @@ -2578,7 +2607,7 @@ cleanup: * Process them as short transfer until reach the td pointed by * the event. */ - } while (ep->skip && trb_comp_code != COMP_MISSED_INT); + } while (handling_skipped_tds); return 0; } @@ -3461,8 +3490,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (start_cycle == 0) field |= 0x1; - /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ - if (xhci->hci_version == 0x100) { + /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ + if (xhci->hci_version >= 0x100) { if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_TX_TYPE(TRB_DATA_IN); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6b0f4a47e402..9957bd96d4bc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci) "waited %u microseconds.\n", XHCI_MAX_HALT_USEC); if (!ret) - xhci->xhc_state &= ~XHCI_STATE_HALTED; + xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); + return ret; } @@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd) } EXPORT_SYMBOL_GPL(xhci_run); -static void xhci_only_stop_hcd(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - spin_lock_irq(&xhci->lock); - xhci_halt(xhci); - spin_unlock_irq(&xhci->lock); -} - /* * Stop xHCI driver. * @@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd) u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - if (!usb_hcd_is_primary_hcd(hcd)) { - xhci_only_stop_hcd(xhci->shared_hcd); + if (xhci->xhc_state & XHCI_STATE_HALTED) return; - } + mutex_lock(&xhci->mutex); spin_lock_irq(&xhci->lock); + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + /* Make sure the xHC is halted for a USB3 roothub * (xhci_stop() could be called as part of failed init). */ @@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_stop completed - status = %x", readl(&xhci->op_regs->status)); + mutex_unlock(&xhci->mutex); } /* @@ -3793,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, mutex_lock(&xhci->mutex); + if (xhci->xhc_state) /* dying or halted */ + goto out; + if (!udev->slot_id) { xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Bad Slot ID %d", udev->slot_id); diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 3ad5d19e4d04..23c794813e6a 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data, if (this_time > max) this_time = max; - memcpy(data, dev->buf, this_time); + memcpy(data, dev->buf + dev->used, this_time); dev->used += this_time; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 514a6cdaeff6..4a518ff12310 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1051,6 +1051,7 @@ void musb_start(struct musb *musb) * (c) peripheral initiates, using SRP */ if (musb->port_mode != MUSB_PORT_MODE_HOST && + musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON && (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { musb->is_active = 1; } else { @@ -2448,6 +2449,9 @@ static int musb_suspend(struct device *dev) struct musb *musb = dev_to_musb(dev); unsigned long flags; + musb_platform_disable(musb); + musb_generic_disable(musb); + spin_lock_irqsave(&musb->lock, flags); if (is_peripheral_active(musb)) { @@ -2501,6 +2505,9 @@ static int musb_resume(struct device *dev) pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); + + musb_start(musb); + return 0; } diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index d07cafb7d5f5..e499b862a946 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -551,6 +551,9 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel) } else { cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); + /* delay to drain to cppi dma pipeline for isoch */ + udelay(250); + csr = musb_readw(epio, MUSB_RXCSR); csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); musb_writew(epio, MUSB_RXCSR, csr); diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index a0cfead6150f..84512d1d5eee 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb) dsps_writel(reg_base, wrp->epintr_set, epmask); dsps_writel(reg_base, wrp->coreintr_set, coremask); - /* start polling for ID change. */ - mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout)); + /* start polling for ID change in dual-role idle mode */ + if (musb->xceiv->otg->state == OTG_STATE_B_IDLE && + musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) + mod_timer(&glue->timer, jiffies + + msecs_to_jiffies(wrp->poll_timeout)); dsps_musb_try_idle(musb, 0); } diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index 39168fe9b406..b2685e75a683 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -379,6 +379,8 @@ static const struct of_device_id ux500_match[] = { {} }; +MODULE_DEVICE_TABLE(of, ux500_match); + static struct platform_driver ux500_driver = { .probe = ux500_probe, .remove = ux500_remove, diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 7d3beee2a587..173132416170 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -155,7 +155,7 @@ config USB_MSM_OTG config USB_QCOM_8X16_PHY tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support" depends on ARCH_QCOM || COMPILE_TEST - depends on RESET_CONTROLLER + depends on RESET_CONTROLLER && EXTCON select USB_PHY select USB_ULPI_VIEWPORT help diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index ec6ecd03269c..5320cb8642cb 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -232,7 +232,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop, clk_rate = pdata->clk_rate; needs_vcc = pdata->needs_vcc; if (gpio_is_valid(pdata->gpio_reset)) { - err = devm_gpio_request_one(dev, pdata->gpio_reset, 0, + err = devm_gpio_request_one(dev, pdata->gpio_reset, + GPIOF_ACTIVE_LOW, dev_name(dev)); if (!err) nop->gpiod_reset = diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index 8a55b37d1a02..db68156568e6 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -31,6 +31,7 @@ static const struct i2c_device_id isp1301_id[] = { { "isp1301", 0 }, { } }; +MODULE_DEVICE_TABLE(i2c, isp1301_id); static struct i2c_client *isp1301_i2c_client; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7b98e1d9194c..d82fa36c3465 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = { .compatible = "renesas,usbhs-r8a7794", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, + { + /* Gen3 is compatible with Gen2 */ + .compatible = "renesas,usbhs-r8a7795", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, { }, }; MODULE_DEVICE_TABLE(of, usbhs_of_match); @@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) return NULL; dparam = &info->driver_param; - dparam->type = of_id ? (u32)of_id->data : 0; + dparam->type = of_id ? (uintptr_t)of_id->data : 0; if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) dparam->buswait_bwait = tmp; gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6d1941a2396a..6956c4f62216 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 +#define ZTE_PRODUCT_ZM8620_X 0x0396 +#define ZTE_PRODUCT_ME3620_MBIM 0x0426 +#define ZTE_PRODUCT_ME3620_X 0x1432 +#define ZTE_PRODUCT_ME3620_L 0x1433 #define ZTE_PRODUCT_AC2726 0xfff1 #define ZTE_PRODUCT_MG880 0xfffd #define ZTE_PRODUCT_CDMA_TECH 0xfffe @@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3), }; +static const struct option_blacklist_info zte_me3620_mbim_blacklist = { + .reserved = BIT(2) | BIT(3) | BIT(4), +}; + +static const struct option_blacklist_info zte_me3620_xl_blacklist = { + .reserved = BIT(3) | BIT(4) | BIT(5), +}; + +static const struct option_blacklist_info zte_zm8620_x_blacklist = { + .reserved = BIT(3) | BIT(4) | BIT(5), +}; + static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; @@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), + .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), + .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 6c3734d2b45a..d3ea90bef84d 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial, static int whiteheat_firmware_attach(struct usb_serial *serial); /* function prototypes for the Connect Tech WhiteHEAT serial converter */ +static int whiteheat_probe(struct usb_serial *serial, + const struct usb_device_id *id); static int whiteheat_attach(struct usb_serial *serial); static void whiteheat_release(struct usb_serial *serial); static int whiteheat_port_probe(struct usb_serial_port *port); @@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = { .description = "Connect Tech - WhiteHEAT", .id_table = id_table_std, .num_ports = 4, + .probe = whiteheat_probe, .attach = whiteheat_attach, .release = whiteheat_release, .port_probe = whiteheat_port_probe, @@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial) /***************************************************************************** * Connect Tech's White Heat serial driver functions *****************************************************************************/ + +static int whiteheat_probe(struct usb_serial *serial, + const struct usb_device_id *id) +{ + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + size_t num_bulk_in = 0; + size_t num_bulk_out = 0; + size_t min_num_bulk; + unsigned int i; + + iface_desc = serial->interface->cur_altsetting; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + endpoint = &iface_desc->endpoint[i].desc; + if (usb_endpoint_is_bulk_in(endpoint)) + ++num_bulk_in; + if (usb_endpoint_is_bulk_out(endpoint)) + ++num_bulk_out; + } + + min_num_bulk = COMMAND_PORT + 1; + if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk) + return -ENODEV; + + return 0; +} + static int whiteheat_attach(struct usb_serial *serial) { struct usb_serial_port *command_port; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 7d137a43cc86..9eda69e40678 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -61,8 +61,7 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | - (1ULL << VIRTIO_NET_F_MRG_RXBUF) | - (1ULL << VIRTIO_F_VERSION_1), + (1ULL << VIRTIO_NET_F_MRG_RXBUF) }; enum { diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index f114a9dbb48f..e25a23692822 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -166,9 +166,7 @@ enum { /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ enum { VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | - (1ULL << VIRTIO_SCSI_F_T10_PI) | - (1ULL << VIRTIO_F_ANY_LAYOUT) | - (1ULL << VIRTIO_F_VERSION_1) + (1ULL << VIRTIO_SCSI_F_T10_PI) }; #define VHOST_SCSI_MAX_TARGET 256 diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index d9c501eaa6c3..f2882ac98726 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -277,10 +277,13 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, return -EFAULT; return 0; case VHOST_SET_FEATURES: + printk(KERN_ERR "1\n"); if (copy_from_user(&features, featurep, sizeof features)) return -EFAULT; + printk(KERN_ERR "2\n"); if (features & ~VHOST_FEATURES) return -EOPNOTSUPP; + printk(KERN_ERR "3\n"); return vhost_test_set_features(n, features); case VHOST_RESET_OWNER: return vhost_test_reset_owner(n); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index ce6f6da4b09f..4772862b71a7 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -173,7 +173,9 @@ enum { VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | (1ULL << VIRTIO_RING_F_EVENT_IDX) | - (1ULL << VHOST_F_LOG_ALL), + (1ULL << VHOST_F_LOG_ALL) | + (1ULL << VIRTIO_F_ANY_LAYOUT) | + (1ULL << VIRTIO_F_VERSION_1) }; static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 1aaf89300621..92f394927f24 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1093,6 +1093,7 @@ static void fbcon_init(struct vc_data *vc, int init) con_copy_unimap(vc, svc); ops = info->fbcon_par; + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); p->con_rotate = initial_rotation; set_blitting_type(vc, info); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 0e5fde1d3ffb..9f9a7bef1ff6 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { dev_err(dev, "Invalid waveform\n"); err = -EINVAL; - goto err_failed; + goto err_fw; } mutex_lock(&(par->io_lock)); @@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, mutex_unlock(&(par->io_lock)); if (err < 0) { dev_err(dev, "Failed to store broadsheet waveform\n"); - goto err_failed; + goto err_fw; } dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); - return len; + err = len; +err_fw: + release_firmware(fw_entry); err_failed: return err; } diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fa2e6f9e322..b335c1ae8625 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) static int fsl_diu_resume(struct platform_device *ofdev) { struct fsl_diu_data *data; + unsigned int i; data = dev_get_drvdata(&ofdev->dev); - enable_lcdc(data->fsl_diu_info); + + fsl_diu_enable_interrupts(data); + update_lcdc(data->fsl_diu_info); + for (i = 0; i < NUM_AOIS; i++) { + if (data->mfb[i].count) + fsl_diu_enable_panel(&data->fsl_diu_info[i]); + } return 0; } diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index 9b8bebdf8f86..f9ec5c0484fa 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = { { .compatible = "fujitsu,coral", }, { /* end */ } }; +MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); static struct platform_driver of_platform_mb862xxfb_driver = { .driver = { diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index a8ce920fa797..d811e6dcaef7 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c @@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { - adapter = of_find_i2c_adapter_by_node(adapter_node); + adapter = of_get_i2c_adapter_by_node(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 90cbc4c3406c..c581231c74a5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = { { .compatible = "omapdss,sony,acx565akm", }, {}, }; +MODULE_DEVICE_TABLE(of, acx565akm_of_match); static struct spi_driver acx565akm_driver = { .driver = { diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 7ed9a227f5ea..01b43e9ce941 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data, writemmr(par, DST1, point(x, y)); writemmr(par, DST2, point(x + w - 1, y + h - 1)); - memcpy(par->io_virt + 0x10000, data, 4 * size); + iowrite32_rep(par->io_virt + 0x10000, data, size); } static void blade_copy_rect(struct tridentfb_par *par, @@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par) static inline void set_lwidth(struct tridentfb_par *par, int width) { write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); - write3X4(par, AddColReg, - (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); + /* chips older than TGUI9660 have only 1 width bit in AddColReg */ + /* touching the other one breaks I2C/DDC */ + if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); + else + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); } /* For resolutions smaller than FP resolution stretch */ diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32d8275e4c88..8a1076beecd3 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np) */ pr_err("%s: error in timing %d\n", of_node_full_name(np), disp->num_timings + 1); + kfree(dt); goto timingfail; } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -817,8 +817,9 @@ config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on (X86 || IA64) && PCI select WATCHDOG_CORE + depends on I2C || I2C=n select LPC_ICH if !EXPERT - select I2C_I801 if !EXPERT + select I2C_I801 if !EXPERT && I2C ---help--- Hardware driver for the intel TCO timer based watchdog devices. These drivers are included in the Intel 82801 I/O Controller diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -36,6 +36,13 @@ #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 #define PM_RSTC_RESET 0x00000102 +/* + * The Raspberry Pi firmware uses the RSTS register to know which partiton + * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. + * Partiton 63 is a special partition used by the firmware to indicate halt. + */ +#define PM_RSTS_RASPBERRYPI_HALT 0x555 + #define SECS_TO_WDOG_TICKS(x) ((x) << 16) #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) @@ -151,8 +158,7 @@ static void bcm2835_power_off(void) * hard reset. */ val = readl_relaxed(wdt->base + PM_RSTS); - val &= PM_RSTC_WRCFG_CLR; - val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; + val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; writel_relaxed(val, wdt->base + PM_RSTS); /* Continue with normal reset mechanism */ diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, gef_wdt_ids); static struct platform_driver gef_wdt_driver = { .driver = { diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { { .compatible = "men,a021-wdt" }, { }, }; +MODULE_DEVICE_TABLE(of, a21_wdt_ids); static struct platform_driver a21_wdt_driver = { .probe = a21_wdt_probe, diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c @@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { { .compatible = "moxa,moxart-watchdog" }, { }, }; +MODULE_DEVICE_TABLE(of, moxart_watchdog_match); static struct platform_driver moxart_wdt_driver = { .probe = moxart_wdt_probe, diff --git a/fs/block_dev.c b/fs/block_dev.c index 22ea424ee741..073bb57adab1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1242,6 +1242,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; } bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); + /* + * If the partition is not aligned on a page + * boundary, we can't do dax I/O to it. + */ + if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) || + (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) + bdev->bd_inode->i_flags &= ~S_DAX; } } else { if (bdev->bd_contains == bdev) { diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ecbc63d3143e..9a2ec79e8cfb 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, int found = 0; struct extent_buffer *eb; struct btrfs_inode_extref *extref; - struct extent_buffer *leaf; u32 item_size; u32 cur_offset; unsigned long ptr; @@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_release_path(path); - leaf = path->nodes[0]; - item_size = btrfs_item_size_nr(leaf, slot); - ptr = btrfs_item_ptr_offset(leaf, slot); + item_size = btrfs_item_size_nr(eb, slot); + ptr = btrfs_item_ptr_offset(eb, slot); cur_offset = 0; while (cur_offset < item_size) { @@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, if (ret) break; - cur_offset += btrfs_inode_extref_name_len(leaf, extref); + cur_offset += btrfs_inode_extref_name_len(eb, extref); cur_offset += sizeof(*extref); } btrfs_tree_read_unlock_blocking(eb); diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 81220b2203c6..0ef5cc13fae2 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -44,8 +44,6 @@ #define BTRFS_INODE_IN_DELALLOC_LIST 9 #define BTRFS_INODE_READDIO_NEED_LOCK 10 #define BTRFS_INODE_HAS_PROPS 11 -/* DIO is ready to submit */ -#define BTRFS_INODE_DIO_READY 12 /* * The following 3 bits are meant only for the btree inode. * When any of them is set, it means an error happened while writing an diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0d98aee34fee..1e60d00d4ea7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb, !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); + if (!IS_ERR(chunk_root->node)) + free_extent_buffer(chunk_root->node); chunk_root->node = NULL; goto fail_tree_roots; } @@ -2885,6 +2887,8 @@ retry_root_backup: !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); + if (!IS_ERR(tree_root->node)) + free_extent_buffer(tree_root->node); tree_root->node = NULL; goto recovery_tree_root; } @@ -3765,9 +3769,7 @@ void close_ctree(struct btrfs_root *root) * block groups queued for removal, the deletion will be * skipped when we quit the cleaner thread. */ - mutex_lock(&root->fs_info->cleaner_mutex); btrfs_delete_unused_bgs(root->fs_info); - mutex_unlock(&root->fs_info->cleaner_mutex); ret = btrfs_commit_super(root); if (ret) diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 8d052209f473..2513a7f53334 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, u32 generation; if (fh_type == FILEID_BTRFS_WITH_PARENT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE) return NULL; root_objectid = fid->root_objectid; } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) return NULL; root_objectid = fid->parent_root_objectid; } else @@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, u32 generation; if ((fh_type != FILEID_BTRFS_WITH_PARENT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE) && (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) && (fh_type != FILEID_BTRFS_WITHOUT_PARENT || - fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) + fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE)) return NULL; objectid = fid->objectid; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5411f0ab5683..601d7d45d164 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2844,6 +2845,7 @@ again: #ifdef SCRAMBLE_DELAYED_REFS delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); #endif + trans->can_flush_pending_bgs = false; ret = __btrfs_run_delayed_refs(trans, root, count); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); @@ -2893,6 +2895,7 @@ again: } out: assert_qgroups_uptodate(trans); + trans->can_flush_pending_bgs = can_flush_pending_bgs; return 0; } @@ -3742,10 +3745,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_reserved = 0; found->bytes_readonly = 0; found->bytes_may_use = 0; - if (total_bytes > 0) - found->full = 0; - else - found->full = 1; + found->full = 0; found->force_alloc = CHUNK_ALLOC_NO_FORCE; found->chunk_alloc = 0; found->flush = 0; @@ -4309,7 +4309,8 @@ out: * the block groups that were made dirty during the lifetime of the * transaction. */ - if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { + if (trans->can_flush_pending_bgs && + trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { btrfs_create_pending_block_groups(trans, trans->root); btrfs_trans_release_chunk_metadata(trans); } @@ -8668,7 +8669,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, } if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { - btrfs_drop_and_free_fs_root(tree_root->fs_info, root); + btrfs_add_dropped_root(trans, root); } else { free_extent_buffer(root->node); free_extent_buffer(root->commit_root); @@ -9563,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, struct btrfs_block_group_item item; struct btrfs_key key; int ret = 0; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; + trans->can_flush_pending_bgs = false; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { if (ret) goto next; @@ -9584,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, next: list_del_init(&block_group->bg_list); } + trans->can_flush_pending_bgs = can_flush_pending_bgs; } int btrfs_make_block_group(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f1018cfbfefa..3915c9473e94 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, bio_end_io_t end_io_func, int mirror_num, unsigned long prev_bio_flags, - unsigned long bio_flags) + unsigned long bio_flags, + bool force_bio_submit) { int ret = 0; struct bio *bio; @@ -2814,6 +2815,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, contig = bio_end_sector(bio) == sector; if (prev_bio_flags != bio_flags || !contig || + force_bio_submit || merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || bio_add_page(bio, page, page_size, offset) < page_size) { ret = submit_one_bio(rw, bio, mirror_num, @@ -2910,7 +2912,8 @@ static int __do_readpage(struct extent_io_tree *tree, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { struct inode *inode = page->mapping->host; u64 start = page_offset(page); @@ -2958,6 +2961,7 @@ static int __do_readpage(struct extent_io_tree *tree, } while (cur <= end) { unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + bool force_bio_submit = false; if (cur >= last_byte) { char *userpage; @@ -3008,6 +3012,49 @@ static int __do_readpage(struct extent_io_tree *tree, block_start = em->block_start; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) block_start = EXTENT_MAP_HOLE; + + /* + * If we have a file range that points to a compressed extent + * and it's followed by a consecutive file range that points to + * to the same compressed extent (possibly with a different + * offset and/or length, so it either points to the whole extent + * or only part of it), we must make sure we do not submit a + * single bio to populate the pages for the 2 ranges because + * this makes the compressed extent read zero out the pages + * belonging to the 2nd range. Imagine the following scenario: + * + * File layout + * [0 - 8K] [8K - 24K] + * | | + * | | + * points to extent X, points to extent X, + * offset 4K, length of 8K offset 0, length 16K + * + * [extent X, compressed length = 4K uncompressed length = 16K] + * + * If the bio to read the compressed extent covers both ranges, + * it will decompress extent X into the pages belonging to the + * first range and then it will stop, zeroing out the remaining + * pages that belong to the other range that points to extent X. + * So here we make sure we submit 2 bios, one for the first + * range and another one for the third range. Both will target + * the same physical extent from disk, but we can't currently + * make the compressed bio endio callback populate the pages + * for both ranges because each compressed bio is tightly + * coupled with a single extent map, and each range can have + * an extent map with a different offset value relative to the + * uncompressed data of our extent and different lengths. This + * is a corner case so we prioritize correctness over + * non-optimal behavior (submitting 2 bios for the same extent). + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && + prev_em_start && *prev_em_start != (u64)-1 && + *prev_em_start != em->orig_start) + force_bio_submit = true; + + if (prev_em_start) + *prev_em_start = em->orig_start; + free_extent_map(em); em = NULL; @@ -3057,7 +3104,8 @@ static int __do_readpage(struct extent_io_tree *tree, bdev, bio, pnr, end_bio_extent_readpage, mirror_num, *bio_flags, - this_bio_flag); + this_bio_flag, + force_bio_submit); if (!ret) { nr++; *bio_flags = this_bio_flag; @@ -3084,7 +3132,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { struct inode *inode; struct btrfs_ordered_extent *ordered; @@ -3104,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, for (index = 0; index < nr_pages; index++) { __do_readpage(tree, pages[index], get_extent, em_cached, bio, - mirror_num, bio_flags, rw); + mirror_num, bio_flags, rw, prev_em_start); page_cache_release(pages[index]); } } @@ -3114,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree, int nr_pages, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { u64 start = 0; u64 end = 0; @@ -3135,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree, index - first_index, start, end, get_extent, em_cached, bio, mirror_num, bio_flags, - rw); + rw, prev_em_start); start = page_start; end = start + PAGE_CACHE_SIZE - 1; first_index = index; @@ -3146,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree, __do_contiguous_readpages(tree, &pages[first_index], index - first_index, start, end, get_extent, em_cached, bio, - mirror_num, bio_flags, rw); + mirror_num, bio_flags, rw, + prev_em_start); } static int __extent_read_full_page(struct extent_io_tree *tree, @@ -3172,7 +3223,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, } ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, - bio_flags, rw); + bio_flags, rw, NULL); return ret; } @@ -3198,7 +3249,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, int ret; ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, - &bio_flags, READ); + &bio_flags, READ, NULL); if (bio) ret = submit_one_bio(READ, bio, mirror_num, bio_flags); return ret; @@ -3451,7 +3502,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, sector, iosize, pg_offset, bdev, &epd->bio, max_nr, end_bio_extent_writepage, - 0, 0, 0); + 0, 0, 0, false); if (ret) SetPageError(page); } @@ -3754,7 +3805,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, PAGE_CACHE_SIZE, 0, bdev, &epd->bio, -1, end_bio_extent_buffer_writepage, - 0, epd->bio_flags, bio_flags); + 0, epd->bio_flags, bio_flags, false); epd->bio_flags = bio_flags; if (ret) { set_btree_ioerr(p); @@ -4158,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree, struct page *page; struct extent_map *em_cached = NULL; int nr = 0; + u64 prev_em_start = (u64)-1; for (page_idx = 0; page_idx < nr_pages; page_idx++) { page = list_entry(pages->prev, struct page, lru); @@ -4174,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree, if (nr < ARRAY_SIZE(pagepool)) continue; __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); nr = 0; } if (nr) __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); if (em_cached) free_extent_map(em_cached); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b823fac91c92..8c6f247ba81d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode, alloc_start); if (ret) goto out; - } else { + } else if (offset + len > inode->i_size) { /* * If we are fallocating from the end of the file onward we * need to zero out the end of the page if i_size lands in the diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0fa7253a2d7..611b66d73e80 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5084,7 +5084,8 @@ void btrfs_evict_inode(struct inode *inode) goto no_delete; } /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ - btrfs_wait_ordered_range(inode, 0, (u64)-1); + if (!special_file(inode->i_mode)) + btrfs_wait_ordered_range(inode, 0, (u64)-1); btrfs_free_io_failure_record(inode, 0, (u64)-1); @@ -7408,6 +7409,10 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, return em; } +struct btrfs_dio_data { + u64 outstanding_extents; + u64 reserve; +}; static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) @@ -7415,10 +7420,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_state *cached_state = NULL; + struct btrfs_dio_data *dio_data = NULL; u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; - u64 *outstanding_extents = NULL; int unlock_bits = EXTENT_LOCKED; int ret = 0; @@ -7436,7 +7441,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * that anything that needs to check if there's a transction doesn't get * confused. */ - outstanding_extents = current->journal_info; + dio_data = current->journal_info; current->journal_info = NULL; } @@ -7568,17 +7573,18 @@ unlock: * within our reservation, otherwise we need to adjust our inode * counter appropriately. */ - if (*outstanding_extents) { - (*outstanding_extents)--; + if (dio_data->outstanding_extents) { + (dio_data->outstanding_extents)--; } else { spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); } - current->journal_info = outstanding_extents; btrfs_free_reserved_data_space(inode, len); - set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags); + WARN_ON(dio_data->reserve < len); + dio_data->reserve -= len; + current->journal_info = dio_data; } /* @@ -7601,8 +7607,8 @@ unlock: unlock_err: clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_bits, 1, 0, &cached_state, GFP_NOFS); - if (outstanding_extents) - current->journal_info = outstanding_extents; + if (dio_data) + current->journal_info = dio_data; return ret; } @@ -8329,7 +8335,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - u64 outstanding_extents = 0; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_dio_data dio_data = { 0 }; size_t count = 0; int flags = 0; bool wakeup = true; @@ -8367,7 +8374,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, ret = btrfs_delalloc_reserve_space(inode, count); if (ret) goto out; - outstanding_extents = div64_u64(count + + dio_data.outstanding_extents = div64_u64(count + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); @@ -8376,7 +8383,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * do the accounting properly if we go over the number we * originally calculated. Abuse current->journal_info for this. */ - current->journal_info = &outstanding_extents; + dio_data.reserve = round_up(count, root->sectorsize); + current->journal_info = &dio_data; } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags)) { inode_dio_end(inode); @@ -8391,16 +8399,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (iov_iter_rw(iter) == WRITE) { current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) { - /* - * If the error comes from submitting stage, - * btrfs_get_blocsk_direct() has free'd data space, - * and metadata space will be handled by - * finish_ordered_fn, don't do that again to make - * sure bytes_may_use is correct. - */ - if (!test_and_clear_bit(BTRFS_INODE_DIO_READY, - &BTRFS_I(inode)->runtime_flags)) - btrfs_delalloc_release_space(inode, count); + if (dio_data.reserve) + btrfs_delalloc_release_space(inode, + dio_data.reserve); } else if (ret >= 0 && (size_t)ret < count) btrfs_delalloc_release_space(inode, count - (size_t)ret); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0adf5422fce9..8d20f3b1cab0 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4639,6 +4639,11 @@ locked: bctl->flags |= BTRFS_BALANCE_TYPE_MASK; } + if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { + ret = -EINVAL; + goto out_bctl; + } + do_balance: /* * Ownership of bctl and mutually_exclusive_operation_running @@ -4650,12 +4655,15 @@ do_balance: need_unlock = false; ret = btrfs_balance(bctl, bargs); + bctl = NULL; if (arg) { if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; } +out_bctl: + kfree(bctl); out_bargs: kfree(bargs); out_unlock: diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index aa72bfd28f7d..a739b825bdd3 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx, /* * We know that it is or will be overwritten. Check this now. * The current inode being processed might have been the one that caused - * inode 'ino' to be orphanized, therefore ow_inode can actually be the - * same as sctx->send_progress. + * inode 'ino' to be orphanized, therefore check if ow_inode matches + * the current inode being processed. */ - if (ow_inode <= sctx->send_progress) + if ((ow_inode < sctx->send_progress) || + (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && + gen == sctx->cur_inode_gen)) ret = 1; else ret = 0; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2b07b3581781..11d1eab9234d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1658,9 +1658,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) * groups on disk until we're mounted read-write again * unless we clean them up here. */ - mutex_lock(&root->fs_info->cleaner_mutex); btrfs_delete_unused_bgs(fs_info); - mutex_unlock(&root->fs_info->cleaner_mutex); btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8f259b3a66b3..a5b06442f0bf 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -117,6 +117,18 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans, btrfs_unpin_free_ino(root); clear_btree_io_tree(&root->dirty_log_pages); } + + /* We can free old roots now. */ + spin_lock(&trans->dropped_roots_lock); + while (!list_empty(&trans->dropped_roots)) { + root = list_first_entry(&trans->dropped_roots, + struct btrfs_root, root_list); + list_del_init(&root->root_list); + spin_unlock(&trans->dropped_roots_lock); + btrfs_drop_and_free_fs_root(fs_info, root); + spin_lock(&trans->dropped_roots_lock); + } + spin_unlock(&trans->dropped_roots_lock); up_write(&fs_info->commit_root_sem); } @@ -255,11 +267,13 @@ loop: INIT_LIST_HEAD(&cur_trans->pending_ordered); INIT_LIST_HEAD(&cur_trans->dirty_bgs); INIT_LIST_HEAD(&cur_trans->io_bgs); + INIT_LIST_HEAD(&cur_trans->dropped_roots); mutex_init(&cur_trans->cache_write_mutex); cur_trans->num_dirty_bgs = 0; spin_lock_init(&cur_trans->dirty_bgs_lock); INIT_LIST_HEAD(&cur_trans->deleted_bgs); spin_lock_init(&cur_trans->deleted_bgs_lock); + spin_lock_init(&cur_trans->dropped_roots_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, fs_info->btree_inode->i_mapping); @@ -336,6 +350,24 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, } +void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_transaction *cur_trans = trans->transaction; + + /* Add ourselves to the transaction dropped list */ + spin_lock(&cur_trans->dropped_roots_lock); + list_add_tail(&root->root_list, &cur_trans->dropped_roots); + spin_unlock(&cur_trans->dropped_roots_lock); + + /* Make sure we don't try to update the root at commit time */ + spin_lock(&root->fs_info->fs_roots_radix_lock); + radix_tree_tag_clear(&root->fs_info->fs_roots_radix, + (unsigned long)root->root_key.objectid, + BTRFS_ROOT_TRANS_TAG); + spin_unlock(&root->fs_info->fs_roots_radix_lock); +} + int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -525,6 +557,7 @@ again: h->delayed_ref_elem.seq = 0; h->type = type; h->allocating_chunk = false; + h->can_flush_pending_bgs = true; h->reloc_reserved = false; h->sync = false; INIT_LIST_HEAD(&h->qgroup_ref_list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index edc2fbc262d7..a994bb097ee5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -65,6 +65,7 @@ struct btrfs_transaction { struct list_head switch_commits; struct list_head dirty_bgs; struct list_head io_bgs; + struct list_head dropped_roots; u64 num_dirty_bgs; /* @@ -76,6 +77,7 @@ struct btrfs_transaction { spinlock_t dirty_bgs_lock; struct list_head deleted_bgs; spinlock_t deleted_bgs_lock; + spinlock_t dropped_roots_lock; struct btrfs_delayed_ref_root delayed_refs; int aborted; int dirty_bg_run; @@ -116,6 +118,7 @@ struct btrfs_trans_handle { short aborted; short adding_csums; bool allocating_chunk; + bool can_flush_pending_bgs; bool reloc_reserved; bool sync; unsigned int type; @@ -216,5 +219,6 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info); int btrfs_transaction_in_commit(struct btrfs_fs_info *info); void btrfs_put_transaction(struct btrfs_transaction *transaction); void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); - +void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root); #endif diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2ca784a14e84..595279a8b99f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -376,6 +376,14 @@ struct map_lookup { #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) +#define BTRFS_BALANCE_ARGS_MASK \ + (BTRFS_BALANCE_ARGS_PROFILES | \ + BTRFS_BALANCE_ARGS_USAGE | \ + BTRFS_BALANCE_ARGS_DEVID | \ + BTRFS_BALANCE_ARGS_DRANGE | \ + BTRFS_BALANCE_ARGS_VRANGE | \ + BTRFS_BALANCE_ARGS_LIMIT) + /* * Profile changing flags. When SOFT is set we won't relocate chunk if * it already has the target profile (even though it may be diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index aa0dc2573374..afa09fce8151 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) return 0; } +/* Server has provided av pairs/target info in the type 2 challenge + * packet and we have plucked it and stored within smb session. + * We parse that blob here to find the server given timestamp + * as part of ntlmv2 authentication (or local current time as + * default in case of failure) + */ +static __le64 +find_timestamp(struct cifs_ses *ses) +{ + unsigned int attrsize; + unsigned int type; + unsigned int onesize = sizeof(struct ntlmssp2_name); + unsigned char *blobptr; + unsigned char *blobend; + struct ntlmssp2_name *attrptr; + + if (!ses->auth_key.len || !ses->auth_key.response) + return 0; + + blobptr = ses->auth_key.response; + blobend = blobptr + ses->auth_key.len; + + while (blobptr + onesize < blobend) { + attrptr = (struct ntlmssp2_name *) blobptr; + type = le16_to_cpu(attrptr->type); + if (type == NTLMSSP_AV_EOL) + break; + blobptr += 2; /* advance attr type */ + attrsize = le16_to_cpu(attrptr->length); + blobptr += 2; /* advance attr size */ + if (blobptr + attrsize > blobend) + break; + if (type == NTLMSSP_AV_TIMESTAMP) { + if (attrsize == sizeof(u64)) + return *((__le64 *)blobptr); + } + blobptr += attrsize; /* advance attr value */ + } + + return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); +} + static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, const struct nls_table *nls_cp) { @@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) struct ntlmv2_resp *ntlmv2; char ntlmv2_hash[16]; unsigned char *tiblob = NULL; /* target info blob */ + __le64 rsp_timestamp; if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { if (!ses->domainName) { @@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) } } + /* Must be within 5 minutes of the server (or in range +/-2h + * in case of Mac OS X), so simply carry over server timestamp + * (as Windows 7 does) + */ + rsp_timestamp = find_timestamp(ses); + baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); tilen = ses->auth_key.len; tiblob = ses->auth_key.response; @@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) (ses->auth_key.response + CIFS_SESS_KEY_SIZE); ntlmv2->blob_signature = cpu_to_le32(0x00000101); ntlmv2->reserved = 0; - /* Must be within 5 minutes of the server */ - ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); + ntlmv2->time = rsp_timestamp; + get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); ntlmv2->reserved2 = 0; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 6a1119e87fbb..e739950ca084 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -325,8 +325,11 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) static void cifs_show_security(struct seq_file *s, struct cifs_ses *ses) { - if (ses->sectype == Unspecified) + if (ses->sectype == Unspecified) { + if (ses->user_name == NULL) + seq_puts(s, ",sec=none"); return; + } seq_puts(s, ",sec="); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 27aea110e923..c3cc1609025f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.07" +#define CIFS_VERSION "2.08" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e2a6af1508af..62203c387db4 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, struct page *page, *tpage; unsigned int expected_index; int rc; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); INIT_LIST_HEAD(tmplist); @@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, */ __set_page_locked(page); rc = add_to_page_cache_locked(page, mapping, - page->index, GFP_KERNEL); + page->index, gfp); /* give up if we can't stick it in the cache */ if (rc) { @@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __set_page_locked(page); - if (add_to_page_cache_locked(page, mapping, page->index, - GFP_KERNEL)) { + if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { __clear_page_locked(page); break; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f621b44cb800..6b66dd5d1540 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, struct tcon_link *tlink = NULL; struct cifs_tcon *tcon = NULL; struct TCP_Server_Info *server; - struct cifs_io_parms io_parms; /* * To avoid spurious oplock breaks from server, in the case of @@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, rc = -ENOSYS; cifsFileInfo_put(open_file); cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - unsigned int bytes_written; - - io_parms.netfid = open_file->fid.netfid; - io_parms.pid = open_file->pid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, - NULL, NULL, 1); - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc); - } } else rc = -EINVAL; @@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, else rc = -ENOSYS; cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - __u16 netfid; - int oplock = 0; - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN, - GENERIC_WRITE, CREATE_NOT_DIR, &netfid, - &oplock, NULL, cifs_sb->local_nls, - cifs_remap(cifs_sb)); - if (rc == 0) { - unsigned int bytes_written; - - io_parms.netfid = netfid; - io_parms.pid = current->tgid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL, - NULL, 1); - cifs_dbg(FYI, "wrt seteof rc %d\n", rc); - CIFSSMBClose(xid, tcon, netfid); - } - } if (tlink) cifs_put_tlink(tlink); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index c63f5227b681..28a77bf1d559 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, goto out_drop_write; } + if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { + rc = -EBADF; + cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); + goto out_fput; + } + if ((!src_file.file->private_data) || (!dst_file->private_data)) { rc = -EBADF; cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index df91bcf56d67..18da19f4f811 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server) break; default: server->echoes = true; - server->oplocks = true; + if (enable_oplocks) { + server->oplocks = true; + server->oplock_credits = 1; + } else + server->oplocks = false; + server->echo_credits = 1; - server->oplock_credits = 1; } server->credits -= server->echo_credits + server->oplock_credits; return 0; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 070fb2ad85ce..597a417ba94d 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -46,6 +46,7 @@ #include "smb2status.h" #include "smb2glob.h" #include "cifspdu.h" +#include "cifs_spnego.h" /* * The following table defines the expected "StructureSize" of SMB2 requests @@ -486,19 +487,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) cifs_dbg(FYI, "missing security blob on negprot\n"); rc = cifs_enable_signing(server, ses->sign); -#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */ if (rc) goto neg_exit; - if (blob_length) + if (blob_length) { rc = decode_negTokenInit(security_blob, blob_length, server); - if (rc == 1) - rc = 0; - else if (rc == 0) { - rc = -EIO; - goto neg_exit; + if (rc == 1) + rc = 0; + else if (rc == 0) + rc = -EIO; } -#endif - neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; @@ -592,7 +589,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ struct TCP_Server_Info *server = ses->server; u16 blob_length = 0; - char *security_blob; + struct key *spnego_key = NULL; + char *security_blob = NULL; char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ @@ -620,7 +618,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, ses->ntlmssp->sesskey_per_smbsess = true; /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */ - ses->sectype = RawNTLMSSP; + if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) + ses->sectype = RawNTLMSSP; ssetup_ntlmssp_authenticate: if (phase == NtLmChallenge) @@ -649,7 +648,48 @@ ssetup_ntlmssp_authenticate: iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field and 1 for pad */ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; - if (phase == NtLmNegotiate) { + + if (ses->sectype == Kerberos) { +#ifdef CONFIG_CIFS_UPCALL + struct cifs_spnego_msg *msg; + + spnego_key = cifs_get_spnego_key(ses); + if (IS_ERR(spnego_key)) { + rc = PTR_ERR(spnego_key); + spnego_key = NULL; + goto ssetup_exit; + } + + msg = spnego_key->payload.data; + /* + * check version field to make sure that cifs.upcall is + * sending us a response in an expected form + */ + if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { + cifs_dbg(VFS, + "bad cifs.upcall version. Expected %d got %d", + CIFS_SPNEGO_UPCALL_VERSION, msg->version); + rc = -EKEYREJECTED; + goto ssetup_exit; + } + ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, + GFP_KERNEL); + if (!ses->auth_key.response) { + cifs_dbg(VFS, + "Kerberos can't allocate (%u bytes) memory", + msg->sesskey_len); + rc = -ENOMEM; + goto ssetup_exit; + } + ses->auth_key.len = msg->sesskey_len; + blob_length = msg->secblob_len; + iov[1].iov_base = msg->data + msg->sesskey_len; + iov[1].iov_len = blob_length; +#else + rc = -EOPNOTSUPP; + goto ssetup_exit; +#endif /* CONFIG_CIFS_UPCALL */ + } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */ ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), GFP_KERNEL); if (ntlmssp_blob == NULL) { @@ -672,6 +712,8 @@ ssetup_ntlmssp_authenticate: /* with raw NTLMSSP we don't encapsulate in SPNEGO */ security_blob = ntlmssp_blob; } + iov[1].iov_base = security_blob; + iov[1].iov_len = blob_length; } else if (phase == NtLmAuthenticate) { req->hdr.SessionId = ses->Suid; ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, @@ -699,6 +741,8 @@ ssetup_ntlmssp_authenticate: } else { security_blob = ntlmssp_blob; } + iov[1].iov_base = security_blob; + iov[1].iov_len = blob_length; } else { cifs_dbg(VFS, "illegal ntlmssp phase\n"); rc = -EIO; @@ -710,8 +754,6 @@ ssetup_ntlmssp_authenticate: cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */ - 4 /* rfc1001 len */); req->SecurityBufferLength = cpu_to_le16(blob_length); - iov[1].iov_base = security_blob; - iov[1].iov_len = blob_length; inc_rfc1001_len(req, blob_length - 1 /* pad */); @@ -722,6 +764,7 @@ ssetup_ntlmssp_authenticate: kfree(security_blob); rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base; + ses->Suid = rsp->hdr.SessionId; if (resp_buftype != CIFS_NO_BUFFER && rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) { if (phase != NtLmNegotiate) { @@ -739,7 +782,6 @@ ssetup_ntlmssp_authenticate: /* NTLMSSP Negotiate sent now processing challenge (response) */ phase = NtLmChallenge; /* process ntlmssp challenge */ rc = 0; /* MORE_PROCESSING is not an error here but expected */ - ses->Suid = rsp->hdr.SessionId; rc = decode_ntlmssp_challenge(rsp->Buffer, le16_to_cpu(rsp->SecurityBufferLength), ses); } @@ -796,6 +838,10 @@ keygen_exit: kfree(ses->auth_key.response); ses->auth_key.response = NULL; } + if (spnego_key) { + key_invalidate(spnego_key); + key_put(spnego_key); + } kfree(ses->ntlmssp); return rc; @@ -876,6 +922,12 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, if (tcon && tcon->bad_network_name) return -ENOENT; + if ((tcon && tcon->seal) && + ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { + cifs_dbg(VFS, "encryption requested but no server support"); + return -EOPNOTSUPP; + } + unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; @@ -955,6 +1007,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); init_copy_chunk_defaults(tcon); + if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA) + cifs_dbg(VFS, "Encrypted shares not supported"); if (tcon->ses->server->ops->validate_negotiate) rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); tcon_exit: @@ -119,7 +119,8 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, size_t len; if (pos == max) { unsigned blkbits = inode->i_blkbits; - sector_t block = pos >> blkbits; + long page = pos >> PAGE_SHIFT; + sector_t block = page << (PAGE_SHIFT - blkbits); unsigned first = pos - (block << blkbits); long size; @@ -284,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh, static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, struct vm_area_struct *vma, struct vm_fault *vmf) { + struct address_space *mapping = inode->i_mapping; sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); unsigned long vaddr = (unsigned long)vmf->virtual_address; void __pmem *addr; @@ -291,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, pgoff_t size; int error; + i_mmap_lock_read(mapping); + /* * Check truncate didn't happen while we were allocating a block. * If it did, this block may or may not be still allocated to the @@ -320,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, error = vm_insert_mixed(vma, vaddr, pfn); out: + i_mmap_unlock_read(mapping); + return error; } @@ -381,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, * from a read fault and we've raced with a truncate */ error = -EIO; - goto unlock; + goto unlock_page; } - } else { - i_mmap_lock_write(mapping); } error = get_block(inode, block, &bh, 0); if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; /* fs corruption? */ if (error) - goto unlock; + goto unlock_page; if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { if (vmf->flags & FAULT_FLAG_WRITE) { @@ -402,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; if (error) - goto unlock; + goto unlock_page; } else { - i_mmap_unlock_write(mapping); return dax_load_hole(mapping, page, vmf); } } @@ -416,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, else clear_user_highpage(new_page, vaddr); if (error) - goto unlock; + goto unlock_page; vmf->page = page; if (!page) { + i_mmap_lock_read(mapping); /* Check we didn't race with truncate */ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (vmf->pgoff >= size) { + i_mmap_unlock_read(mapping); error = -EIO; - goto unlock; + goto out; } } return VM_FAULT_LOCKED; @@ -460,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); } - if (!page) - i_mmap_unlock_write(mapping); out: if (error == -ENOMEM) return VM_FAULT_OOM | major; @@ -470,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; - unlock: + unlock_page: if (page) { unlock_page(page); page_cache_release(page); - } else { - i_mmap_unlock_write(mapping); } - goto out; } EXPORT_SYMBOL(__dax_fault); @@ -555,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); bh.b_size = PMD_SIZE; - i_mmap_lock_write(mapping); length = get_block(inode, block, &bh, write); if (length) return VM_FAULT_SIGBUS; + i_mmap_lock_read(mapping); /* * If the filesystem isn't willing to tell us the length of a hole, @@ -568,24 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) goto fallback; - if (buffer_unwritten(&bh) || buffer_new(&bh)) { - int i; - for (i = 0; i < PTRS_PER_PMD; i++) - clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); - wmb_pmem(); - count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); - result |= VM_FAULT_MAJOR; - } - /* * If we allocated new storage, make sure no process has any * zero pages covering this hole */ if (buffer_new(&bh)) { - i_mmap_unlock_write(mapping); + i_mmap_unlock_read(mapping); unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); - i_mmap_lock_write(mapping); + i_mmap_lock_read(mapping); } /* @@ -632,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) goto fallback; + if (buffer_unwritten(&bh) || buffer_new(&bh)) { + int i; + for (i = 0; i < PTRS_PER_PMD; i++) + clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); + wmb_pmem(); + count_vm_event(PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + result |= VM_FAULT_MAJOR; + } + result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); } out: + i_mmap_unlock_read(mapping); + if (buffer_unwritten(&bh)) complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); - i_mmap_unlock_write(mapping); - return result; fallback: diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 47728da7702c..b46e9fc64196 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -63,7 +63,7 @@ config EXT4_FS If unsure, say N. config EXT4_USE_FOR_EXT2 - bool "Use ext4 for ext2/ext3 file systems" + bool "Use ext4 for ext2 file systems" depends on EXT4_FS depends on EXT2_FS=n default y diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index e26803fb210d..560af0437704 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping, if (pages) { page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) goto next_page; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 587ac08eabb6..29e4599f6fc1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, struct wb_writeback_work *base_work, bool skip_if_busy) { - int next_memcg_id = 0; - struct bdi_writeback *wb; - struct wb_iter iter; + struct bdi_writeback *last_wb = NULL; + struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list, + struct bdi_writeback, bdi_node); might_sleep(); restart: rcu_read_lock(); - bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { + list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); struct wb_writeback_work fallback_work; struct wb_writeback_work *work; long nr_pages; + if (last_wb) { + wb_put(last_wb); + last_wb = NULL; + } + /* SYNC_ALL writes out I_DIRTY_TIME too */ if (!wb_has_dirty_io(wb) && (base_work->sync_mode == WB_SYNC_NONE || @@ -819,12 +824,22 @@ restart: wb_queue_work(wb, work); - next_memcg_id = wb->memcg_css->id + 1; + /* + * Pin @wb so that it stays on @bdi->wb_list. This allows + * continuing iteration from @wb after dropping and + * regrabbing rcu read lock. + */ + wb_get(wb); + last_wb = wb; + rcu_read_unlock(); wb_wait_for_completion(bdi, &fallback_work_done); goto restart; } rcu_read_unlock(); + + if (last_wb) + wb_put(last_wb); } #else /* CONFIG_CGROUP_WRITEBACK */ @@ -1481,6 +1496,21 @@ static long writeback_sb_inodes(struct super_block *sb, wbc_detach_inode(&wbc); work->nr_pages -= write_chunk - wbc.nr_to_write; wrote += write_chunk - wbc.nr_to_write; + + if (need_resched()) { + /* + * We're trying to balance between building up a nice + * long list of IOs to improve our merge rate, and + * getting those IOs out quickly for anyone throttling + * in balance_dirty_pages(). cond_resched() doesn't + * unplug, so get our IOs out the door before we + * give up the CPU. + */ + blk_flush_plug(current); + cond_resched(); + } + + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) @@ -1488,7 +1518,7 @@ static long writeback_sb_inodes(struct super_block *sb, requeue_inode(inode, wb, &wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); - cond_resched_lock(&wb->list_lock); + /* * bail out to wb_writeback() often enough to check * background threshold and other termination conditions. @@ -1842,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { struct bdi_writeback *wb; - struct wb_iter iter; if (!bdi_has_dirty_io(bdi)) continue; - bdi_for_each_wb(wb, bdi, &iter, 0) + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), false, reason); } @@ -1879,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w) rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { struct bdi_writeback *wb; - struct wb_iter iter; - bdi_for_each_wb(wb, bdi, &iter, 0) - if (!list_empty(&bdi->wb.b_dirty_time)) - wb_wakeup(&bdi->wb); + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) + if (!list_empty(&wb->b_dirty_time)) + wb_wakeup(wb); } rcu_read_unlock(); schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); diff --git a/fs/mpage.c b/fs/mpage.c index 778a4ddef77a..a7c34274f207 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, sector_t *last_block_in_bio, struct buffer_head *map_bh, - unsigned long *first_logical_block, get_block_t get_block) + unsigned long *first_logical_block, get_block_t get_block, + gfp_t gfp) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -277,8 +278,7 @@ alloc_new: goto out; } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), - min_t(int, nr_pages, BIO_MAX_PAGES), - GFP_KERNEL); + min_t(int, nr_pages, BIO_MAX_PAGES), gfp); if (bio == NULL) goto confused; } @@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); map_bh.b_state = 0; map_bh.b_size = 0; @@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + page->index, + gfp)) { bio = do_mpage_readpage(bio, page, nr_pages - page_idx, &last_block_in_bio, &map_bh, &first_logical_block, - get_block); + get_block, gfp); } page_cache_release(page); } @@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping); map_bh.b_state = 0; map_bh.b_size = 0; bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, - &map_bh, &first_logical_block, get_block); + &map_bh, &first_logical_block, get_block, gfp); if (bio) mpage_bio_submit(READ, bio); return 0; diff --git a/fs/namei.c b/fs/namei.c index 726d211db484..33e9495a3129 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd, negative = d_is_negative(dentry); if (read_seqcount_retry(&dentry->d_seq, seq)) return -ECHILD; - if (negative) - return -ENOENT; /* * This sequence count validates that the parent had no @@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd, goto unlazy; } } + /* + * Note: do negative dentry check after revalidation in + * case that drops it. + */ + if (negative) + return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 2714ef835bdd..be806ead7f4d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -113,7 +113,8 @@ out: return status; } -static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) +static int nfs_delegation_claim_opens(struct inode *inode, + const nfs4_stateid *stateid, fmode_t type) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_context *ctx; @@ -140,7 +141,7 @@ again: /* Block nfs4_proc_unlck */ mutex_lock(&sp->so_delegreturn_mutex); seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); - err = nfs4_open_delegation_recall(ctx, state, stateid); + err = nfs4_open_delegation_recall(ctx, state, stateid, type); if (!err) err = nfs_delegation_claim_locks(ctx, state, stateid); if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) @@ -411,7 +412,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation do { if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) break; - err = nfs_delegation_claim_opens(inode, &delegation->stateid); + err = nfs_delegation_claim_opens(inode, &delegation->stateid, + delegation->type); if (!issync || err != -EAGAIN) break; /* diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index a44829173e57..333063e032f0 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp); /* NFSv4 delegation-related procedures */ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type); int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 38678d9a5cc4..4b1d08f56aba 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -166,8 +166,11 @@ nfs_direct_select_verf(struct nfs_direct_req *dreq, struct nfs_writeverf *verfp = &dreq->verf; #ifdef CONFIG_NFS_V4_1 - if (ds_clp) { - /* pNFS is in use, use the DS verf */ + /* + * pNFS is in use, use the DS verf except commit_through_mds is set + * for layout segment where nbuckets is zero. + */ + if (ds_clp && dreq->ds_cinfo.nbuckets > 0) { if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; else diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index b34f2e228601..02ec07973bc4 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -629,23 +629,18 @@ out_put: goto out; } -static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl) +static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) { int i; - for (i = 0; i < fl->num_fh; i++) { - if (!fl->fh_array[i]) - break; - kfree(fl->fh_array[i]); + if (fl->fh_array) { + for (i = 0; i < fl->num_fh; i++) { + if (!fl->fh_array[i]) + break; + kfree(fl->fh_array[i]); + } + kfree(fl->fh_array); } - kfree(fl->fh_array); - fl->fh_array = NULL; -} - -static void -_filelayout_free_lseg(struct nfs4_filelayout_segment *fl) -{ - filelayout_free_fh_array(fl); kfree(fl); } @@ -716,21 +711,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, /* Do we want to use a mempool here? */ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); if (!fl->fh_array[i]) - goto out_err_free; + goto out_err; p = xdr_inline_decode(&stream, 4); if (unlikely(!p)) - goto out_err_free; + goto out_err; fl->fh_array[i]->size = be32_to_cpup(p++); if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { printk(KERN_ERR "NFS: Too big fh %d received %d\n", i, fl->fh_array[i]->size); - goto out_err_free; + goto out_err; } p = xdr_inline_decode(&stream, fl->fh_array[i]->size); if (unlikely(!p)) - goto out_err_free; + goto out_err; memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); dprintk("DEBUG: %s: fh len %d\n", __func__, fl->fh_array[i]->size); @@ -739,8 +734,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, __free_page(scratch); return 0; -out_err_free: - filelayout_free_fh_array(fl); out_err: __free_page(scratch); return -EIO; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index d731bbf974aa..0f020e4d8421 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -175,10 +175,12 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) { struct nfs_server *server = NFS_SERVER(file_inode(filep)); struct nfs4_exception exception = { }; - int err; + loff_t err; do { err = _nfs42_proc_llseek(filep, offset, whence); + if (err >= 0) + break; if (err == -ENOTSUPP) return -EOPNOTSUPP; err = nfs4_handle_exception(server, err, &exception); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 693b903b48bd..5133bb18830e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1127,6 +1127,21 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) return ret; } +static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, + fmode_t fmode) +{ + switch(fmode & (FMODE_READ|FMODE_WRITE)) { + case FMODE_READ|FMODE_WRITE: + return state->n_rdwr != 0; + case FMODE_WRITE: + return state->n_wronly != 0; + case FMODE_READ: + return state->n_rdonly != 0; + } + WARN_ON_ONCE(1); + return false; +} + static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) { int ret = 0; @@ -1443,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); - if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { + switch (data->o_arg.claim) { + default: + break; + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + case NFS4_OPEN_CLAIM_DELEG_CUR_FH: pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); - } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) + return; + } + if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) nfs_inode_set_delegation(state->inode, data->owner->so_cred, &data->o_res); @@ -1571,17 +1592,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context return opendata; } -static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) +static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, + fmode_t fmode) { struct nfs4_state *newstate; int ret; - if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || - opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) && - (opendata->o_arg.u.delegation_type & fmode) != fmode) - /* This mode can't have been delegated, so we must have - * a valid open_stateid to cover it - not need to reclaim. - */ + if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) return 0; opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; @@ -1597,14 +1614,14 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); + if (newstate != opendata->state) + ret = -ESTALE; nfs4_close_state(newstate, fmode); - *res = newstate; - return 0; + return ret; } static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) { - struct nfs4_state *newstate; int ret; /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ @@ -1615,27 +1632,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * clear_bit(NFS_DELEGATED_STATE, &state->flags); clear_bit(NFS_OPEN_STATE, &state->flags); smp_rmb(); - if (state->n_rdwr != 0) { - ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); - if (ret != 0) - return ret; - if (newstate != state) - return -ESTALE; - } - if (state->n_wronly != 0) { - ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); - if (ret != 0) - return ret; - if (newstate != state) - return -ESTALE; - } - if (state->n_rdonly != 0) { - ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); - if (ret != 0) - return ret; - if (newstate != state) - return -ESTALE; - } + ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); + if (ret != 0) + return ret; + ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); + if (ret != 0) + return ret; + ret = nfs4_open_recover_helper(opendata, FMODE_READ); + if (ret != 0) + return ret; /* * We may have performed cached opens for all three recoveries. * Check if we need to update the current stateid. @@ -1759,18 +1764,35 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct return err; } -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, + struct nfs4_state *state, const nfs4_stateid *stateid, + fmode_t type) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_opendata *opendata; - int err; + int err = 0; opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_DELEG_CUR_FH); if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); - err = nfs4_open_recover(opendata, state); + write_seqlock(&state->seqlock); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); + write_sequnlock(&state->seqlock); + clear_bit(NFS_DELEGATED_STATE, &state->flags); + switch (type & (FMODE_READ|FMODE_WRITE)) { + case FMODE_READ|FMODE_WRITE: + case FMODE_WRITE: + err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); + if (err) + break; + err = nfs4_open_recover_helper(opendata, FMODE_WRITE); + if (err) + break; + case FMODE_READ: + err = nfs4_open_recover_helper(opendata, FMODE_READ); + } nfs4_opendata_put(opendata); return nfs4_handle_delegation_recall_error(server, state, stateid, err); } @@ -1850,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) data->rpc_done = 0; data->rpc_status = 0; data->timestamp = jiffies; + if (data->is_recover) + nfs4_set_sequence_privileged(&data->c_arg.seq_args); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -2645,6 +2669,15 @@ out: return err; } +static bool +nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) +{ + if (inode == NULL || !nfs_have_layout(inode)) + return false; + + return pnfs_wait_on_layoutreturn(inode, task); +} + struct nfs4_closedata { struct inode *inode; struct nfs4_state *state; @@ -2763,6 +2796,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) goto out_no_action; } + if (nfs4_wait_on_layoutreturn(inode, task)) { + nfs_release_seqid(calldata->arg.seqid); + goto out_wait; + } + if (calldata->arg.fmode == 0) task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; if (calldata->roc) @@ -5308,6 +5346,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) d_data = (struct nfs4_delegreturndata *)data; + if (nfs4_wait_on_layoutreturn(d_data->inode, task)) + return; + if (d_data->roc) pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); @@ -7800,39 +7841,46 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", __func__, delay); rpc_delay(task, delay); - task->tk_status = 0; - rpc_restart_call_prepare(task); - goto out; /* Do not call nfs4_async_handle_error() */ + /* Do not call nfs4_async_handle_error() */ + goto out_restart; } break; case -NFS4ERR_EXPIRED: case -NFS4ERR_BAD_STATEID: spin_lock(&inode->i_lock); - lo = NFS_I(inode)->layout; - if (!lo || list_empty(&lo->plh_segs)) { + if (nfs4_stateid_match(&lgp->args.stateid, + &lgp->args.ctx->state->stateid)) { spin_unlock(&inode->i_lock); /* If the open stateid was bad, then recover it. */ state = lgp->args.ctx->state; - } else { + break; + } + lo = NFS_I(inode)->layout; + if (lo && nfs4_stateid_match(&lgp->args.stateid, + &lo->plh_stateid)) { LIST_HEAD(head); /* * Mark the bad layout state as invalid, then retry * with the current stateid. */ + set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); spin_unlock(&inode->i_lock); pnfs_free_lseg_list(&head); - - task->tk_status = 0; - rpc_restart_call_prepare(task); - } + } else + spin_unlock(&inode->i_lock); + goto out_restart; } if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) - rpc_restart_call_prepare(task); + goto out_restart; out: dprintk("<-- %s\n", __func__); return; +out_restart: + task->tk_status = 0; + rpc_restart_call_prepare(task); + return; out_overflow: task->tk_status = -EOVERFLOW; goto out; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index da73bc443238..d854693a15b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1481,7 +1481,7 @@ restart: spin_unlock(&state->state_lock); } nfs4_put_open_state(state); - clear_bit(NFS4CLNT_RECLAIM_NOGRACE, + clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); spin_lock(&sp->so_lock); goto restart; @@ -1725,7 +1725,8 @@ restart: if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; - atomic_inc(&sp->so_count); + if (!atomic_inc_not_zero(&sp->so_count)) + continue; spin_unlock(&clp->cl_lock); rcu_read_unlock(); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 28df12e525ba..671cf68fe56b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event, __entry->flags = flags; __entry->fmode = (__force unsigned int)ctx->mode; __entry->dev = ctx->dentry->d_sb->s_dev; - if (!IS_ERR(state)) + if (!IS_ERR_OR_NULL(state)) inode = state->inode; if (inode != NULL) { __entry->fileid = NFS_FILEID(inode); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7c5718ba625e..fe3ddd20ff89 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, * for it without upsetting the slab allocator. */ if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * - sizeof(struct page) > PAGE_SIZE) + sizeof(struct page *) > PAGE_SIZE) return 0; return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index ba1246433794..8abe27165ad0 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1104,20 +1104,15 @@ bool pnfs_roc(struct inode *ino) mark_lseg_invalid(lseg, &tmp_list); found = true; } - /* pnfs_prepare_layoutreturn() grabs lo ref and it will be put - * in pnfs_roc_release(). We don't really send a layoutreturn but - * still want others to view us like we are sending one! - * - * If pnfs_prepare_layoutreturn() fails, it means someone else is doing - * LAYOUTRETURN, so we proceed like there are no layouts to return. - * - * ROC in three conditions: + /* ROC in two conditions: * 1. there are ROC lsegs * 2. we don't send layoutreturn - * 3. no others are sending layoutreturn */ - if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo)) + if (found && !layoutreturn) { + /* lo ref dropped in pnfs_roc_release() */ + pnfs_get_layout_hdr(lo); roc = true; + } out_noroc: spin_unlock(&ino->i_lock); @@ -1172,6 +1167,26 @@ void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) spin_unlock(&ino->i_lock); } +bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) +{ + struct nfs_inode *nfsi = NFS_I(ino); + struct pnfs_layout_hdr *lo; + bool sleep = false; + + /* we might not have grabbed lo reference. so need to check under + * i_lock */ + spin_lock(&ino->i_lock); + lo = nfsi->layout; + if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + sleep = true; + spin_unlock(&ino->i_lock); + + if (sleep) + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + + return sleep; +} + /* * Compare two layout segments for sorting into layout cache. * We want to preferentially return RW over RO layouts, so ensure those diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 78c9351ff117..d1990e90e7a0 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -270,6 +270,7 @@ bool pnfs_roc(struct inode *ino); void pnfs_roc_release(struct inode *ino); void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); +bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task); void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); int pnfs_layoutcommit_inode(struct inode *inode, bool sync); @@ -639,6 +640,12 @@ pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) { } +static inline bool +pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) +{ + return false; +} + static inline void set_pnfs_layoutdriver(struct nfs_server *s, const struct nfs_fh *mntfh, u32 id) { diff --git a/fs/nfs/read.c b/fs/nfs/read.c index ae0ff7a11b40..01b8cc8e8cfc 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) { struct nfs_pgio_mirror *mirror; + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) + pgio->pg_ops->pg_cleanup(pgio); + pgio->pg_ops = &nfs_pgio_rw_ops; /* read path should never have more than one mirror */ diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 388f48079c43..75ab7622e0cc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); ret = pgio->pg_error; - } + } else + nfs_add_stats(page_file_mapping(page)->host, + NFSIOS_WRITEPAGES, 1); out: return ret; } static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) { - struct inode *inode = page_file_mapping(page)->host; int ret; - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); if (ret == -EAGAIN) { @@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_pageio_descriptor pgio; + struct inode *inode = page_file_mapping(page)->host; int err; - nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); + nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, &nfs_async_write_completion_ops); err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); @@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino return 1; if (!flctx || (list_empty_careful(&flctx->flc_flock) && list_empty_careful(&flctx->flc_posix))) - return 0; + return 1; /* Check to see if there are whole file write locks */ ret = 0; @@ -1351,6 +1351,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) { struct nfs_pgio_mirror *mirror; + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) + pgio->pg_ops->pg_cleanup(pgio); + pgio->pg_ops = &nfs_pgio_rw_ops; nfs_pageio_stop_mirroring(pgio); diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdefaa331a07..c29d9421bd5e 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, u32 device_generation = 0; int error; - /* - * We do not attempt to support I/O smaller than the fs block size, - * or not aligned to it. - */ - if (args->lg_minlength < block_size) { - dprintk("pnfsd: I/O too small\n"); - goto out_layoutunavailable; - } if (seg->offset & (block_size - 1)) { dprintk("pnfsd: I/O misaligned\n"); goto out_layoutunavailable; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 46b8b2bbc95a..ce38b4ccc9ab 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, int found, ret; int set_maybe; int dispatch_assert = 0; + int dispatched = 0; if (!dlm_grab(dlm)) return DLM_MASTER_RESP_NO; @@ -1657,16 +1658,20 @@ send_response: if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); response = DLM_MASTER_RESP_ERROR; + spin_unlock(&res->spinlock); dlm_lockres_put(res); - } else + } else { + dispatched = 1; __dlm_lockres_grab_inflight_worker(dlm, res); - spin_unlock(&res->spinlock); + spin_unlock(&res->spinlock); + } } else { if (res) dlm_lockres_put(res); } - dlm_put(dlm); + if (!dispatched) + dlm_put(dlm); return response; } @@ -2090,7 +2095,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, /* queue up work for dlm_assert_master_worker */ - dlm_grab(dlm); /* get an extra ref for the work item */ dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); item->u.am.lockres = res; /* already have a ref */ /* can optionally ignore node numbers higher than this node */ diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index ce12e0b1a31f..58eaa5c0d387 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, unsigned int hash; int master = DLM_LOCK_RES_OWNER_UNKNOWN; u32 flags = DLM_ASSERT_MASTER_REQUERY; + int dispatched = 0; if (!dlm_grab(dlm)) { /* since the domain has gone away on this @@ -1719,9 +1720,11 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, dlm_put(dlm); /* sender will take care of this and retry */ return ret; - } else + } else { + dispatched = 1; __dlm_lockres_grab_inflight_worker(dlm, res); - spin_unlock(&res->spinlock); + spin_unlock(&res->spinlock); + } } else { /* put.. incase we are not the master */ spin_unlock(&res->spinlock); @@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, } spin_unlock(&dlm->spinlock); - dlm_put(dlm); + if (!dispatched) + dlm_put(dlm); return master; } diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index ba1323a94924..a586467f6ff6 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) unsigned order; void *data; int ret; + gfp_t gfp = mapping_gfp_mask(inode->i_mapping); /* make various checks */ order = get_order(newsize); @@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) /* allocate enough contiguous pages to be able to satisfy the * request */ - pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); + pages = alloc_pages(gfp, order); if (!pages) return -ENOMEM; @@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) struct page *page = pages + loop; ret = add_to_page_cache_lru(page, inode->i_mapping, loop, - GFP_KERNEL); + gfp); if (ret < 0) goto add_error; diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, { int err; - mutex_lock(&inode->i_mutex); err = security_inode_init_security(inode, dentry, qstr, &init_xattrs, 0); - mutex_unlock(&inode->i_mutex); - if (err) { struct ubifs_info *c = dentry->i_sb->s_fs_info; ubifs_err(c, "cannot initialize security for inode %lu, error %d", diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 634e676072cb..50311703135b 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -467,8 +467,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) * the fault_*wqh. */ spin_lock(&ctx->fault_pending_wqh.lock); - __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range); - __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range); + __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); + __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); spin_unlock(&ctx->fault_pending_wqh.lock); wake_up_poll(&ctx->fd_wqh, POLLHUP); @@ -650,10 +650,10 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx, spin_lock(&ctx->fault_pending_wqh.lock); /* wake all in the range and autoremove */ if (waitqueue_active(&ctx->fault_pending_wqh)) - __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, + __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, range); if (waitqueue_active(&ctx->fault_wqh)) - __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range); + __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range); spin_unlock(&ctx->fault_pending_wqh.lock); } @@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags) file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); - if (IS_ERR(file)) + if (IS_ERR(file)) { + mmput(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); + } out: return file; } diff --git a/include/acpi/button.h b/include/acpi/button.h index 97eea0e4c016..1cad8b2d460c 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -3,7 +3,7 @@ #include <linux/notifier.h> -#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) +#if IS_ENABLED(CONFIG_ACPI_BUTTON) extern int acpi_lid_notifier_register(struct notifier_block *nb); extern int acpi_lid_notifier_unregister(struct notifier_block *nb); extern int acpi_lid_open(void); @@ -20,6 +20,6 @@ static inline int acpi_lid_open(void) { return 1; } -#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ +#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ #endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/video.h b/include/acpi/video.h index e840b294c6f5..c62392d9b52a 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -24,7 +24,7 @@ enum acpi_backlight_type { acpi_backlight_native, }; -#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) +#if IS_ENABLED(CONFIG_ACPI_VIDEO) extern int acpi_video_register(void); extern void acpi_video_unregister(void); extern int acpi_video_get_edid(struct acpi_device *device, int type, diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index f20f407ce45d..4b4b056a6eb0 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -73,7 +73,7 @@ * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 83bfb87f5bf1..e2aadbc7151f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) cpu_relax(); } -#ifndef virt_queued_spin_lock -static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) +#ifndef virt_spin_lock +static __always_inline bool virt_spin_lock(struct qspinlock *lock) { return false; } diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include <linux/kernel.h> +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -568,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..5340099741ae 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { u8 *bytes; }; +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 struct drm_dp_remote_i2c_read { u8 num_transactions; u8 port_number; @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { u8 *bytes; u8 no_stop_bit; u8 i2c_transaction_delay; - } transactions[4]; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; u8 read_i2c_device_id; u8 num_bytes_read; }; @@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d901f1a47be6..4e14dac282bb 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -35,11 +35,7 @@ #define VGIC_V3_MAX_LRS 16 #define VGIC_MAX_IRQS 1024 #define VGIC_V2_MAX_CPUS 8 - -/* Sanity checks... */ -#if (KVM_MAX_VCPUS > 255) -#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now -#endif +#define VGIC_V3_MAX_CPUS 255 #if (VGIC_NR_IRQS_LEGACY & 31) #error "VGIC_NR_IRQS must be a multiple of 32" diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -217,6 +217,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ + #ifdef CONFIG_CGROUP_WRITEBACK struct percpu_ref refcnt; /* used only for !root wb's */ struct fprop_local_percpu memcg_completions; @@ -150,6 +152,7 @@ struct backing_dev_info { atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5a5d79ee256f..c85f74946a8b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,18 +13,23 @@ #include <linux/sched.h> #include <linux/blkdev.h> #include <linux/writeback.h> +#include <linux/memcontrol.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> int __must_check bdi_init(struct backing_dev_info *bdi); -void bdi_destroy(struct backing_dev_info *bdi); +void bdi_exit(struct backing_dev_info *bdi); __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +void bdi_unregister(struct backing_dev_info *bdi); + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +void bdi_destroy(struct backing_dev_info *bdi); + void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, bool range_cyclic, enum wb_reason reason); void wb_start_background_writeback(struct bdi_writeback *wb); @@ -252,13 +257,19 @@ int inode_congested(struct inode *inode, int cong_bits); * @inode: inode of interest * * cgroup writeback requires support from both the bdi and filesystem. - * Test whether @inode has both. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. + * + * Note that the test result may change dynamically on the same inode + * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return bdi_cap_account_dirty(bdi) && + return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && + cgroup_on_dfl(blkcg_root_css->cgroup) && + bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -401,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) rcu_read_unlock(); } -struct wb_iter { - int start_memcg_id; - struct radix_tree_iter tree_iter; - void **slot; -}; - -static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, - struct backing_dev_info *bdi) -{ - struct radix_tree_iter *titer = &iter->tree_iter; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (iter->start_memcg_id >= 0) { - iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); - iter->start_memcg_id = -1; - } else { - iter->slot = radix_tree_next_slot(iter->slot, titer, 0); - } - - if (!iter->slot) - iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); - if (iter->slot) - return *iter->slot; - return NULL; -} - -static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, - struct backing_dev_info *bdi, - int start_memcg_id) -{ - iter->start_memcg_id = start_memcg_id; - - if (start_memcg_id) - return __wb_iter_next(iter, bdi); - else - return &bdi->wb; -} - -/** - * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order - * @wb_cur: cursor struct bdi_writeback pointer - * @bdi: bdi to walk wb's of - * @iter: pointer to struct wb_iter to be used as iteration buffer - * @start_memcg_id: memcg ID to start iteration from - * - * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending - * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter - * to be used as temp storage during iteration. rcu_read_lock() must be - * held throughout iteration. - */ -#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ - for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ - (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) - #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -515,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -struct wb_iter { - int next_id; -}; - -#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ - for ((iter)->next_id = (start_blkcg_id); \ - ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) - static inline int inode_congested(struct inode *inode, int cong_bits) { return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -145,7 +145,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, - BLK_MQ_F_SYSFS_UP = 1 << 3, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq, int error); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, - void *priv); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -456,6 +456,8 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; + + bool mq_sysfs_init_done; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ @@ -1368,6 +1370,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); } +static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) +{ + if (!bio_has_data(prev)) + return false; + + return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], + next->bi_io_vec[0].bv_offset); +} + +static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, req->biotail, bio); +} + +static inline bool req_gap_front_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, bio, req->bio); +} + struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); @@ -1494,6 +1516,26 @@ queue_max_integrity_segments(struct request_queue *q) return q->limits.max_integrity_segments; } +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + struct bio_integrity_payload *bip = bio_integrity(req->bio); + struct bio_integrity_payload *bip_next = bio_integrity(next); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_integrity_payload *bip_next = bio_integrity(req->bio); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + #else /* CONFIG_BLK_DEV_INTEGRITY */ struct bio; @@ -1560,6 +1602,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) { return 0; } +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + return false; +} +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + return false; +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 4763ad64e832..f89b31d45cc8 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features) CEPH_FEATURE_OSDMAP_ENC | \ CEPH_FEATURE_CRUSH_TUNABLES3 | \ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ + CEPH_FEATURE_MSGR_KEEPALIVE2 | \ CEPH_FEATURE_CRUSH_V4) #define CEPH_FEATURES_REQUIRED_DEFAULT \ diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 7e1252e97a30..b2371d9b51fa 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -238,6 +238,8 @@ struct ceph_connection { bool out_kvec_is_msg; /* kvec refers to out_msg */ int out_more; /* there is more data after the kvecs */ __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ /* message in temps */ struct ceph_msg_header in_hdr; @@ -248,7 +250,7 @@ struct ceph_connection { int in_base_pos; /* bytes read */ __le64 in_temp_ack; /* for reading an ack */ - struct timespec last_keepalive_ack; + struct timespec last_keepalive_ack; /* keepalive2 ack stamp */ struct delayed_work work; /* send|recv work */ unsigned long delay; /* current delay interval */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 4d8fcf2187dc..8492721b39be 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -473,31 +473,8 @@ struct cgroup_subsys { unsigned int depends_on; }; -extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; - -/** - * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups - * @tsk: target task - * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. - */ -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) -{ - percpu_down_read(&cgroup_threadgroup_rwsem); -} - -/** - * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups - * @tsk: target task - * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). - */ -static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) -{ - percpu_up_read(&cgroup_threadgroup_rwsem); -} +void cgroup_threadgroup_change_begin(struct task_struct *tsk); +void cgroup_threadgroup_change_end(struct task_struct *tsk); #else /* CONFIG_CGROUPS */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 31ce435981fe..bdcf358dfce2 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -18,15 +18,6 @@ struct clock_event_device; struct module; -/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ -enum clock_event_mode { - CLOCK_EVT_MODE_UNUSED, - CLOCK_EVT_MODE_SHUTDOWN, - CLOCK_EVT_MODE_PERIODIC, - CLOCK_EVT_MODE_ONESHOT, - CLOCK_EVT_MODE_RESUME, -}; - /* * Possible states of a clock event device. * @@ -86,16 +77,14 @@ enum clock_event_state { * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE * @state_use_accessors:current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries - * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. - * @set_state_periodic: switch state to periodic, if !set_mode - * @set_state_oneshot: switch state to oneshot, if !set_mode - * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode - * @set_state_shutdown: switch state to shutdown, if !set_mode - * @tick_resume: resume clkevt device, if !set_mode + * @set_state_periodic: switch state to periodic + * @set_state_oneshot: switch state to oneshot + * @set_state_oneshot_stopped: switch state to oneshot_stopped + * @set_state_shutdown: switch state to shutdown + * @tick_resume: resume clkevt device * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration @@ -116,18 +105,10 @@ struct clock_event_device { u64 min_delta_ns; u32 mult; u32 shift; - enum clock_event_mode mode; enum clock_event_state state_use_accessors; unsigned int features; unsigned long retries; - /* - * State transition callback(s): Only one of the two groups should be - * defined: - * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. - * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume(). - */ - void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..8efb40e61d6e 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -237,12 +237,25 @@ #define KASAN_ABI_VERSION 3 #endif +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) #define __noclone /* not needed */ #endif +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..3d7810341b57 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include <uapi/linux/types.h> -static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) { - switch (size) { - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; - default: - barrier(); - __builtin_memcpy((void *)res, (const void *)p, size); - barrier(); - } + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; } +#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) #define WRITE_ONCE(x, val) \ ({ \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 430efcbea48e..dca22de98d94 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -127,9 +127,14 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *policy); #else +static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return NULL; +} static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { return NULL; diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index ce447f0f1bad..68030e22af35 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -65,7 +65,10 @@ struct devfreq_dev_status { * The "flags" parameter's possible values are * explained above with "DEVFREQ_FLAG_*" macros. * @get_dev_status: The device should provide the current performance - * status to devfreq, which is used by governors. + * status to devfreq. Governors are recommended not to + * use this directly. Instead, governors are recommended + * to use devfreq_update_stats() along with + * devfreq.last_status. * @get_cur_freq: The device should provide the current frequency * at which it is operating. * @exit: An optional callback that is called when devfreq @@ -161,6 +164,7 @@ struct devfreq { struct delayed_work work; unsigned long previous_freq; + struct devfreq_dev_status last_status; void *data; /* private data for governors */ @@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +/** + * devfreq_update_stats() - update the last_status pointer in struct devfreq + * @df: the devfreq instance whose status needs updating + * + * Governors are recommended to use this function along with last_status, + * which allows other entities to reuse the last_status without affecting + * the values fetched later by governors. + */ +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} + #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq @@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { } + +static inline int devfreq_update_stats(struct devfreq *df) +{ + return -EINVAL; +} #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, return ret; } -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, } static inline -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order) { return NULL; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d0b380ee7d67..e38681f4912d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,6 +25,13 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; +#ifdef CONFIG_CGROUPS +#define INIT_GROUP_RWSEM(sig) \ + .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), +#else +#define INIT_GROUP_RWSEM(sig) +#endif + #ifdef CONFIG_CPUSETS #define INIT_CPUSET_SEQ(tsk) \ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), @@ -57,6 +64,7 @@ extern struct fs_struct init_fs; INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ + INIT_GROUP_RWSEM(sig) \ } extern struct nsproxy init_nsproxy; diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -int iommu_iova_cache_init(void); -void iommu_iova_cache_destroy(void); +int iova_cache_get(void); +void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); diff --git a/include/linux/irq.h b/include/linux/irq.h index 6f8b34066442..11bf09288ddb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -110,8 +110,8 @@ enum { /* * Return value for chip->irq_set_affinity() * - * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendent irqchips. @@ -129,9 +129,19 @@ struct irq_domain; * struct irq_common_data - per irq data shared by all irqchips * @state_use_accessors: status information for irq chip functions. * Use accessor functions to deal with it + * @node: node index useful for balancing + * @handler_data: per-IRQ data for the irq_chip methods + * @affinity: IRQ affinity on SMP + * @msi_desc: MSI descriptor */ struct irq_common_data { unsigned int state_use_accessors; +#ifdef CONFIG_NUMA + unsigned int node; +#endif + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; }; /** @@ -139,38 +149,26 @@ struct irq_common_data { * @mask: precomputed bitmask for accessing the chip registers * @irq: interrupt number * @hwirq: hardware interrupt number, local to the interrupt domain - * @node: node index useful for balancing * @common: point to data shared by all irqchips * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. * @parent_data: pointer to parent struct irq_data to support hierarchy * irq_domain - * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations - * @msi_desc: MSI descriptor - * @affinity: IRQ affinity on SMP - * - * The fields here need to overlay the ones in irq_desc until we - * cleaned up the direct references and switched everything over to - * irq_data. */ struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; - unsigned int node; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; #endif - void *handler_data; void *chip_data; - struct msi_desc *msi_desc; - cpumask_var_t affinity; }; /* @@ -190,6 +188,7 @@ struct irq_data { * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed + * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -204,6 +203,7 @@ enum { IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), + IRQD_FORWARDED_TO_VCPU = (1 << 20), }; #define __irqd_to_state(d) ((d)->common->state_use_accessors) @@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; } +static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; +} /* * Functions for chained handlers which can be enabled/disabled by the @@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq) * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ -extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_level_irq(struct irq_desc *desc); +extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_edge_irq(struct irq_desc *desc); +extern void handle_edge_eoi_irq(struct irq_desc *desc); +extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_percpu_irq(struct irq_desc *desc); +extern void handle_percpu_devid_irq(struct irq_desc *desc); +extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); @@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->handler_data : NULL; + return d ? d->common->handler_data : NULL; } static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { - return d->handler_data; + return d->common->handler_data; } static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->msi_desc : NULL; + return d ? d->common->msi_desc : NULL; } static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) { - return d->msi_desc; + return d->common->msi_desc; } static inline u32 irq_get_trigger_type(unsigned int irq) @@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq) return d ? irqd_get_trigger_type(d) : 0; } -static inline int irq_data_get_node(struct irq_data *d) +static inline int irq_common_data_get_node(struct irq_common_data *d) { +#ifdef CONFIG_NUMA return d->node; +#else + return 0; +#endif +} + +static inline int irq_data_get_node(struct irq_data *d) +{ + return irq_common_data_get_node(d->common); } static inline struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->affinity : NULL; + return d ? d->common->affinity : NULL; } static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { - return d->affinity; + return d->common->affinity; } unsigned int arch_dynirq_lower_bound(unsigned int from); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5acfa26602e1..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - return irq_to_desc(data->irq); -#else - return container_of(data, struct irq_desc, irq_data); -#endif + return container_of(data->common, struct irq_desc, irq_common_data); } static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) @@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc) static inline void *irq_desc_get_handler_data(struct irq_desc *desc) { - return desc->irq_data.handler_data; + return desc->irq_common_data.handler_data; } static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) { - return desc->irq_data.msi_desc; + return desc->irq_common_data.msi_desc; } /* * Architectures call this to let the generic IRQ layer - * handle an interrupt. If the descriptor is attached to an - * irqchip-style controller then we call the ->handle_irq() handler, - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + * handle an interrupt. */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +static inline void generic_handle_irq_desc(struct irq_desc *desc) { - desc->handle_irq(irq, desc); + desc->handle_irq(desc); } int generic_handle_irq(unsigned int irq); @@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq) return irq_desc_has_action(irq_to_desc(irq)); } -/* caller has locked the irq_desc and both params are valid */ -static inline void __irq_set_handler_locked(unsigned int irq, - irq_flow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->handle_irq = handler; -} - -/* caller has locked the irq_desc and both params are valid */ -static inline void -__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, - irq_flow_handler_t handler, const char *name) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - irq_desc_get_irq_data(desc)->chip = chip; - desc->handle_irq = handler; - desc->name = name; -} - /** * irq_set_handler_locked - Set irq handler from a locked region * @data: Pointer to the irq_data structure which identifies the irq diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..f644fdb06dd6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -161,6 +161,11 @@ enum { IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return d->of_node; +} + #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, irq_hw_number_t hwirq_max, int direct_max, diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -8,7 +8,7 @@ struct irq_desc; struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_flow_handler_t)(struct irq_desc *desc); typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7f653e8f6690..f1094238ab2a 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -21,8 +21,8 @@ * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); - * static_key_likely() - * statick_key_unlikely() + * static_branch_likely() + * static_branch_unlikely() * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support, if we @@ -45,12 +45,10 @@ * statement, setting the key to true requires us to patch in a jump * to the out-of-line of true branch. * - * In addtion to static_branch_{enable,disable}, we can also reference count + * In addition to static_branch_{enable,disable}, we can also reference count * the key or branch direction via static_branch_{inc,dec}. Thus, * static_branch_inc() can be thought of as a 'make more true' and - * static_branch_dec() as a 'make more false'. The inc()/dec() - * interface is meant to be used exclusively from the inc()/dec() for a given - * key. + * static_branch_dec() as a 'make more false'. * * Since this relies on modifying code, the branch modifying functions * must be considered absolute slow paths (machine wide synchronization etc.). diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..3e3318ddfc0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -242,7 +242,6 @@ struct mem_cgroup { * percpu counter. */ struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; @@ -677,8 +676,9 @@ enum { struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); #else /* CONFIG_CGROUP_WRITEBACK */ @@ -688,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) } static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, - unsigned long *pavail, + unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { u8 rsvd[8]; }; -struct mlx5_cmd_query_special_contexts_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_special_contexts_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 resd_lkey; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); struct mlx5_profile { u64 mask; diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ + page->mem_cgroup = memcg; +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88a00694eda5..2d15e3831440 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n) BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); } #ifdef CONFIG_SMP diff --git a/include/linux/phy.h b/include/linux/phy.h index 962387a192f1..4a4e3a092337 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/mii.h> +#include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> @@ -153,6 +154,7 @@ struct sk_buff; * PHYs should register using this structure */ struct mii_bus { + struct module *owner; const char *name; char id[MII_BUS_ID_SIZE]; void *priv; @@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void) return mdiobus_alloc_size(0); } -int mdiobus_register(struct mii_bus *bus); +int __mdiobus_register(struct mii_bus *bus, struct module *owner); +#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); +void phy_device_remove(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - const int __n = ARRAY_SIZE(__crcu_array); \ - struct rcu_synchronize __rs_array[__n]; \ - \ - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) diff --git a/include/linux/sched.h b/include/linux/sched.h index a4ab9daa387c..b7b9501b41af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -762,6 +762,18 @@ struct signal_struct { unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif +#ifdef CONFIG_CGROUPS + /* + * group_rwsem prevents new tasks from entering the threadgroup and + * member tasks from exiting,a more specifically, setting of + * PF_EXITING. fork and exit paths are protected with this rwsem + * using threadgroup_change_begin/end(). Users which require + * threadgroup to remain stable should use threadgroup_[un]lock() + * which also takes care of exec path. Currently, cgroup is the + * only user. + */ + struct rw_semaphore group_rwsem; +#endif oom_flags_t oom_flags; short oom_score_adj; /* OOM kill score adjustment */ diff --git a/include/linux/security.h b/include/linux/security.h index 79d85ddf8093..2f4c1f7aa7db 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg4, unsigned long arg5) { - return cap_task_prctl(option, arg2, arg3, arg3, arg5); + return cap_task_prctl(option, arg2, arg3, arg4, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2738d355cdf9..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -179,6 +179,9 @@ struct nf_bridge_info { u8 bridged_dnat:1; __u16 frag_max_size; struct net_device *physindev; + + /* always valid & non-NULL from FORWARD on, for physdev match */ + struct net_device *physoutdev; union { /* prerouting: detect dnat in orig/reply direction */ __be32 ipv4_daddr; @@ -189,9 +192,6 @@ struct nf_bridge_info { * skb is out in neigh layer. */ char neigh_header[8]; - - /* always valid & non-NULL from FORWARD on, for physdev match */ - struct net_device *physoutdev; }; }; #endif @@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = CHECKSUM_NONE; } unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 269e8afd3e2a..6b00f18f5e6b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @clock: lock protecting this structure + * @lock: lock protecting this structure * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7591788e9fbf..357e44c1a46b 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -42,6 +42,7 @@ struct sock_xprt { /* * Connection of transports */ + unsigned long sock_state; struct delayed_work connect_worker; struct sockaddr_storage srcaddr; unsigned short srcport; @@ -76,6 +77,8 @@ struct sock_xprt { */ #define TCP_RPC_REPLY (1UL << 6) +#define XPRT_SOCK_CONNECTING 1U + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 17292fee8686..157d366e761b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -360,7 +360,7 @@ static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) { - return NULL; + return ERR_PTR(-ENODEV); } static inline @@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) int power_actor_get_max_power(struct thermal_cooling_device *, struct thermal_zone_device *tz, u32 *max_power); +int power_actor_get_min_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *min_power); int power_actor_set_power(struct thermal_cooling_device *, struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, @@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *max_power) { return 0; } +static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + u32 *min_power) +{ return -ENODEV; } static inline int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *tz, u32 power) { return 0; } diff --git a/include/linux/tick.h b/include/linux/tick.h index 48d901f83f92..e312219ff823 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) cpumask_or(mask, mask, tick_nohz_full_mask); } +static inline int housekeeping_any_cpu(void) +{ + return cpumask_any_and(housekeeping_mask, cpu_online_mask); +} + extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick_cpu(int cpu); extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(void); #else +static inline int housekeeping_any_cpu(void) +{ + return smp_processor_id(); +} static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - u32 type; + uintptr_t type; u32 enable_gpio; /* diff --git a/include/linux/wait.h b/include/linux/wait.h index d3d077228d4c..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) typedef int wait_bit_action_f(struct wait_bit_key *); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); -void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, - void *key); +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); @@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, (void *) (m)) #define wake_up_locked_poll(x, m) \ - __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) + __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) #define wake_up_interruptible_sync_poll(x, m) \ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..b36d837c701e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -63,7 +63,11 @@ struct unix_sock { #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; }; -#define unix_sk(__sk) ((struct unix_sock *)__sk) + +static inline struct unix_sock *unix_sk(const struct sock *sk) +{ + return (struct unix_sock *)sk; +} #define peer_wait peer_wq.wait diff --git a/include/net/flow.h b/include/net/flow.h index acd6a096250e..9b85db85f13c 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -35,6 +35,7 @@ struct flowi_common { #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_KNOWN_NH 0x02 #define FLOWI_FLAG_VRFSRC 0x04 +#define FLOWI_FLAG_SKIP_NH_OIF 0x08 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; }; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 879d6e5a973b..fc1937698625 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, + bool rearm); + +static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, false); +} + +static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, true); +} + void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); void inet_twsk_purge(struct inet_hashinfo *hashinfo, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 063d30474cf6..aaf9700fc9e5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc); int fib6_del(struct rt6_info *rt, struct nl_info *info); -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); +void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, + unsigned int flags); void fib6_run_gc(unsigned long expires, struct net *net, bool force); diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index b8529aa1dae7..fa915fa0f703 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -32,6 +32,12 @@ struct __ip6_tnl_parm { __be32 o_key; }; +struct ip6_tnl_dst { + seqlock_t lock; + struct dst_entry __rcu *dst; + u32 cookie; +}; + /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -39,8 +45,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct dst_entry *dst_cache; /* cached dst */ - u32 dst_cookie; + struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); +struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); +int ip6_tnl_dst_init(struct ip6_tnl *t); +void ip6_tnl_dst_destroy(struct ip6_tnl *t); void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); +void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, @@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device_stats *stats = &dev->stats; int pkt_len, err; - pkt_len = skb->len; + pkt_len = skb->len - skb_inner_network_offset(skb); err = ip6_local_out_sk(sk, skb); if (net_xmit_eval(err) == 0) { diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a37d0432bebd..727d6e9a9685 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, rcu_read_lock(); tb = fib_get_table(net, RT_TABLE_MAIN); - if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) - err = 0; + if (tb) + err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); + + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); @@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { struct fib_table *tb; - int err; + int err = -ENETUNREACH; flags |= FIB_LOOKUP_NOREF; if (net->ipv4.fib_has_custom_rules) @@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, res->tclassid = 0; - for (err = 0; !err; err = -ENETUNREACH) { - tb = rcu_dereference_rtnl(net->ipv4.fib_main); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; + tb = rcu_dereference_rtnl(net->ipv4.fib_main); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); + + if (!err) + goto out; + + tb = rcu_dereference_rtnl(net->ipv4.fib_default); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); - tb = rcu_dereference_rtnl(net->ipv4.fib_default); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; - } +out: + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9a6a3ba888e8..f6dafec9102c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); +struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, + gfp_t flags); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, int gso_type_mask); diff --git a/include/net/route.h b/include/net/route.h index cc61cb95f059..f46af256880c 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -255,7 +255,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 flow_flags |= FLOWI_FLAG_ANYSRC; if (netif_index_is_vrf(sock_net(sk), oif)) - flow_flags |= FLOWI_FLAG_VRFSRC; + flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF; flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, protocol, flow_flags, dst, src, dport, sport); diff --git a/include/net/sock.h b/include/net/sock.h index 7aa78440559a..e23717013a4e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -828,6 +828,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s if (sk_rcvqueues_full(sk, limit)) return -ENOBUFS; + /* + * If the skb was allocated from pfmemalloc reserves, only + * allow SOCK_MEMALLOC sockets to use it as this socket is + * helping free memory + */ + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) + return -ENOMEM; + __sk_add_backlog(sk, skb); sk->sk_backlog.len += skb->truesize; return 0; diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 391dae1931c0..a0fa975cd1c1 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -294,8 +294,8 @@ struct opa_port_states { struct opa_port_state_info { struct opa_port_states port_states; - u16 link_width_downgrade_tx_active; - u16 link_width_downgrade_rx_active; + __be16 link_width_downgrade_tx_active; + __be16 link_width_downgrade_rx_active; }; struct opa_port_info { diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h index bb7b2ebfee7b..d8e33d38da43 100644 --- a/include/sound/rcar_snd.h +++ b/include/sound/rcar_snd.h @@ -12,7 +12,6 @@ #ifndef RCAR_SND_H #define RCAR_SND_H -#include <linux/sh_clk.h> #define RSND_GEN1_SRU 0 #define RSND_GEN1_ADG 1 diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h index b9b4f289fe6b..0399352f3a62 100644 --- a/include/sound/simple_card.h +++ b/include/sound/simple_card.h @@ -19,6 +19,8 @@ struct asoc_simple_dai { unsigned int sysclk; int slots; int slot_width; + unsigned int tx_slot_mask; + unsigned int rx_slot_mask; struct clk *clk; }; diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 2df96b1384c7..212eaaf172ed 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -48,10 +48,25 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */ /* - * DAI hardware signal inversions. + * DAI hardware signal polarity. * * Specifies whether the DAI can also support inverted clocks for the specified * format. + * + * BCLK: + * - "normal" polarity means signal is available at rising edge of BCLK + * - "inverted" polarity means signal is available at falling edge of BCLK + * + * FSYNC "normal" polarity depends on the frame format: + * - I2S: frame consists of left then right channel data. Left channel starts + * with falling FSYNC edge, right channel starts with rising FSYNC edge. + * - Left/Right Justified: frame consists of left then right channel data. + * Left channel starts with rising FSYNC edge, right channel starts with + * falling FSYNC edge. + * - DSP A/B: Frame starts with rising FSYNC edge. + * - AC97: Frame starts with rising FSYNC edge. + * + * "Negative" FSYNC polarity is the one opposite of "normal" polarity. */ #define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */ #define SND_SOC_DAIFMT_NB_IF (2 << 8) /* normal BCLK + inv FRM */ @@ -214,7 +229,7 @@ struct snd_soc_dai_driver { int (*suspend)(struct snd_soc_dai *dai); int (*resume)(struct snd_soc_dai *dai); /* compress dai */ - bool compress_dai; + int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); /* DAI is also used for the control bus */ bool bus_control; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 5abba037d245..7855cfe46b69 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -451,6 +451,9 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( struct snd_kcontrol *kcontrol); +struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget( + struct snd_kcontrol *kcontrol); + int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level); diff --git a/include/sound/soc.h b/include/sound/soc.h index 884e728b09d9..904d69429087 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -86,7 +86,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array),\ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx,\ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -156,7 +156,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx, \ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -226,6 +226,18 @@ .info = snd_soc_info_volsw, \ .get = xhandler_get, .put = xhandler_put, \ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) } +#define SOC_SINGLE_RANGE_EXT_TLV(xname, xreg, xshift, xmin, xmax, xinvert, \ + xhandler_get, xhandler_put, tlv_array) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ + SNDRV_CTL_ELEM_ACCESS_READWRITE,\ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw_range, \ + .get = xhandler_get, .put = xhandler_put, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, .shift = xshift, \ + .rshift = xshift, .min = xmin, .max = xmax, \ + .platform_max = xmax, .invert = xinvert} } #define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\ xhandler_get, xhandler_put, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ @@ -440,7 +452,9 @@ int snd_soc_platform_read(struct snd_soc_platform *platform, int snd_soc_platform_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val); int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num); -int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num); +#ifdef CONFIG_SND_SOC_COMPRESS +int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num); +#endif struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card, const char *dai_link, int stream); @@ -574,6 +588,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); +int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo); #define snd_soc_info_bool_ext snd_ctl_boolean_mono_info int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); @@ -591,7 +607,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); -int snd_soc_limit_volume(struct snd_soc_codec *codec, +int snd_soc_limit_volume(struct snd_soc_card *card, const char *name, int max); int snd_soc_bytes_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); @@ -1601,6 +1617,8 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card, int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, const char *propname); int snd_soc_of_parse_tdm_slot(struct device_node *np, + unsigned int *tx_mask, + unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width); void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h index 898be3a8db9a..6d8f8fba3341 100644 --- a/include/sound/wm8904.h +++ b/include/sound/wm8904.h @@ -119,7 +119,7 @@ #define WM8904_MIC_REGS 2 #define WM8904_GPIO_REGS 4 #define WM8904_DRC_REGS 4 -#define WM8904_EQ_REGS 25 +#define WM8904_EQ_REGS 24 /** * DRC configurations are specified with a label and a set of register diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ac9bf1c0e42d..5f48754dc36a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -730,6 +730,7 @@ struct se_device { #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 #define DF_USING_UDEV_PATH 0x00000008 #define DF_USING_ALIAS 0x00000010 +#define DF_READ_ONLY 0x00000020 /* Physical device queue depth */ u32 queue_depth; /* Used for SPC-2 reservations enforce of ISIDs */ diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index 9df61f1edb0f..3094618d382f 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h @@ -80,8 +80,10 @@ * SA_RESTORER 0x04000000 */ +#if !defined MINSIGSTKSZ || !defined SIGSTKSZ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#endif #ifndef __ASSEMBLY__ typedef struct { diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 8da542a2874d..ee124009e12a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -709,17 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 281 __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) -#define __NR_membarrier 282 +#define __NR_userfaultfd 282 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 283 __SYSCALL(__NR_membarrier, sys_membarrier) #undef __NR_syscalls -#define __NR_syscalls 283 +#define __NR_syscalls 284 /* * All syscalls below here should go away really, * these are provided for both review and as a porting * help for the C library version. -* + * * Last chance: are any of these important enough to * enable by default? */ diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 34141a5dfe74..f8b01887a495 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -21,8 +21,6 @@ enum lwtunnel_ip_t { LWTUNNEL_IP_SRC, LWTUNNEL_IP_TTL, LWTUNNEL_IP_TOS, - LWTUNNEL_IP_SPORT, - LWTUNNEL_IP_DPORT, LWTUNNEL_IP_FLAGS, __LWTUNNEL_IP_MAX, }; @@ -36,8 +34,6 @@ enum lwtunnel_ip6_t { LWTUNNEL_IP6_SRC, LWTUNNEL_IP6_HOPLIMIT, LWTUNNEL_IP6_TC, - LWTUNNEL_IP6_SPORT, - LWTUNNEL_IP6_DPORT, LWTUNNEL_IP6_FLAGS, __LWTUNNEL_IP6_MAX, }; diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 32e07d8cbaf4..036f73bc54cd 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -323,10 +323,10 @@ enum ovs_key_attr { OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. * The implementation may restrict * the accepted length of the array. */ - OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */ + OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */ OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ - OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */ + OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -439,9 +439,9 @@ struct ovs_key_nd { __u8 nd_tll[ETH_ALEN]; }; -#define OVS_CT_LABEL_LEN 16 -struct ovs_key_ct_label { - __u8 ct_label[OVS_CT_LABEL_LEN]; +#define OVS_CT_LABELS_LEN 16 +struct ovs_key_ct_labels { + __u8 ct_labels[OVS_CT_LABELS_LEN]; }; /* OVS_KEY_ATTR_CT_STATE flags */ @@ -449,9 +449,9 @@ struct ovs_key_ct_label { #define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ #define OVS_CS_F_RELATED 0x04 /* Related to an established * connection. */ -#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */ -#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */ -#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */ +#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ +#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ +#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. @@ -618,22 +618,24 @@ struct ovs_action_hash { /** * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. - * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags. + * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack + * table. This allows future packets for the same connection to be identified + * as 'established' or 'related'. * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the * mask, the corresponding bit in the value is copied to the connection * tracking mark field in the connection. - * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN + * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, - OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */ + OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */ OVS_CT_ATTR_ZONE, /* u16 zone id. */ OVS_CT_ATTR_MARK, /* mark to associate with this connection. */ - OVS_CT_ATTR_LABEL, /* label to associate with this connection. */ + OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ __OVS_CT_ATTR_MAX @@ -641,14 +643,6 @@ enum ovs_ct_attr { #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) -/* - * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_* - * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows - * future packets for the same connection to be identified as 'established' - * or 'related'. - */ -#define OVS_CT_F_COMMIT 0x01 - /** * enum ovs_action_attr - Action types. * @@ -705,7 +699,7 @@ enum ovs_action_attr { * data immediately followed by a mask. * The data must be zero for the unmasked * bits. */ - OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */ + OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 702024769c74..9d8f5d10c1e5 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -160,7 +160,7 @@ struct rtattr { /* Macros to handle rtattributes */ -#define RTA_ALIGNTO 4 +#define RTA_ALIGNTO 4U #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ (rta)->rta_len >= sizeof(struct rtattr) && \ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,8 +11,6 @@ #include <linux/types.h> -#include <linux/compiler.h> - #define UFFD_API ((__u64)0xAA) /* * After implementing the respective features it will become: diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index 9ce083960a25..f18490985fc8 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -107,5 +107,13 @@ struct sched_watchdog { #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ +/* + * Domain asked to perform 'soft reset' for it. The expected behavior is to + * reset internal Xen state for the domain returning it to the point where it + * was created but leaving the domain's memory contents and vCPU contexts + * intact. This will allow the domain to start over and set up all Xen specific + * interfaces again. + */ +#define SHUTDOWN_soft_reset 5 #endif /* __XEN_PUBLIC_SCHED_H__ */ diff --git a/ipc/msg.c b/ipc/msg.c index 66c4f567eb73..1471db9a7e61 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; - } - msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); diff --git a/ipc/shm.c b/ipc/shm.c index 222131e8e38f..41787276e141 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; @@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; + goto no_id; + } + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); /* diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) rcu_read_lock(); spin_lock(&new->lock); + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) ids->in_use++; - current_euid_egid(&euid, &egid); - new->cuid = new->uid = euid; - new->gid = new->cgid = egid; - if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2cf0f79f1fc9..2c9eae6ad970 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -46,7 +46,6 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/rwsem.h> -#include <linux/percpu-rwsem.h> #include <linux/string.h> #include <linux/sort.h> #include <linux/kmod.h> @@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); */ static DEFINE_SPINLOCK(release_agent_path_lock); -struct percpu_rw_semaphore cgroup_threadgroup_rwsem; - #define cgroup_assert_mutex_or_rcu_locked() \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&cgroup_mutex), \ @@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } +void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + down_read(&tsk->signal->group_rwsem); +} + +void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + up_read(&tsk->signal->group_rwsem); +} + +/** + * threadgroup_lock - lock threadgroup + * @tsk: member task of the threadgroup to lock + * + * Lock the threadgroup @tsk belongs to. No new task is allowed to enter + * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or + * change ->group_leader/pid. This is useful for cases where the threadgroup + * needs to stay stable across blockable operations. + * + * fork and exit explicitly call threadgroup_change_{begin|end}() for + * synchronization. While held, no new task will be added to threadgroup + * and no existing live task will have its PF_EXITING set. + * + * de_thread() does threadgroup_change_{begin|end}() when a non-leader + * sub-thread becomes a new leader. + */ +static void threadgroup_lock(struct task_struct *tsk) +{ + down_write(&tsk->signal->group_rwsem); +} + +/** + * threadgroup_unlock - unlock threadgroup + * @tsk: member task of the threadgroup to unlock + * + * Reverse threadgroup_lock(). + */ +static inline void threadgroup_unlock(struct task_struct *tsk) +{ + up_write(&tsk->signal->group_rwsem); +} + static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) { struct cgroup *root_cgrp = kf_root->kn->priv; @@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, lockdep_assert_held(&css_set_rwsem); /* - * We are synchronized through cgroup_threadgroup_rwsem against - * PF_EXITING setting such that we can't race against cgroup_exit() - * changing the css_set to init_css_set and dropping the old one. + * We are synchronized through threadgroup_lock() against PF_EXITING + * setting such that we can't race against cgroup_exit() changing the + * css_set to init_css_set and dropping the old one. */ WARN_ON_ONCE(tsk->flags & PF_EXITING); old_cset = task_css_set(tsk); @@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) * @src_cset and add it to @preloaded_csets, which should later be cleaned * up by cgroup_migrate_finish(). * - * This function may be called without holding cgroup_threadgroup_rwsem - * even if the target is a process. Threads may be created and destroyed - * but as long as cgroup_mutex is not dropped, no new css_set can be put - * into play and the preloaded css_sets are guaranteed to cover all - * migrations. + * This function may be called without holding threadgroup_lock even if the + * target is a process. Threads may be created and destroyed but as long + * as cgroup_mutex is not dropped, no new css_set can be put into play and + * the preloaded css_sets are guaranteed to cover all migrations. */ static void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp, @@ -2240,7 +2278,7 @@ err: * @threadgroup: whether @leader points to the whole process or a single task * * Migrate a process or task denoted by @leader to @cgrp. If migrating a - * process, the caller must be holding cgroup_threadgroup_rwsem. The + * process, the caller must be holding threadgroup_lock of @leader. The * caller is also responsible for invoking cgroup_migrate_add_src() and * cgroup_migrate_prepare_dst() on the targets before invoking this * function and following up with cgroup_migrate_finish(). @@ -2368,7 +2406,7 @@ out_release_tset: * @leader: the task or the leader of the threadgroup to be attached * @threadgroup: attach the whole threadgroup? * - * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. + * Call holding cgroup_mutex and threadgroup_lock of @leader. */ static int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup) @@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, if (!cgrp) return -ENODEV; - percpu_down_write(&cgroup_threadgroup_rwsem); +retry_find_task: rcu_read_lock(); if (pid) { tsk = find_task_by_vpid(pid); if (!tsk) { + rcu_read_unlock(); ret = -ESRCH; - goto out_unlock_rcu; + goto out_unlock_cgroup; } } else { tsk = current; @@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, */ if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { ret = -EINVAL; - goto out_unlock_rcu; + rcu_read_unlock(); + goto out_unlock_cgroup; } get_task_struct(tsk); rcu_read_unlock(); + threadgroup_lock(tsk); + if (threadgroup) { + if (!thread_group_leader(tsk)) { + /* + * a race with de_thread from another thread's exec() + * may strip us of our leadership, if this happens, + * there is no choice but to throw this task away and + * try again; this is + * "double-double-toil-and-trouble-check locking". + */ + threadgroup_unlock(tsk); + put_task_struct(tsk); + goto retry_find_task; + } + } + ret = cgroup_procs_write_permission(tsk, cgrp, of); if (!ret) ret = cgroup_attach_task(cgrp, tsk, threadgroup); - put_task_struct(tsk); - goto out_unlock_threadgroup; + threadgroup_unlock(tsk); -out_unlock_rcu: - rcu_read_unlock(); -out_unlock_threadgroup: - percpu_up_write(&cgroup_threadgroup_rwsem); + put_task_struct(tsk); +out_unlock_cgroup: cgroup_kn_unlock(of->kn); return ret ?: nbytes; } @@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); - /* look up all csses currently attached to @cgrp's subtree */ down_read(&css_set_rwsem); css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { @@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) goto out_finish; last_task = task; + threadgroup_lock(task); + /* raced against de_thread() from another thread? */ + if (!thread_group_leader(task)) { + threadgroup_unlock(task); + put_task_struct(task); + continue; + } + ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); + threadgroup_unlock(task); put_task_struct(task); if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) @@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) out_finish: cgroup_migrate_finish(&preloaded_csets); - percpu_up_write(&cgroup_threadgroup_rwsem); return ret; } @@ -5024,7 +5083,6 @@ int __init cgroup_init(void) unsigned long key; int ssid, err; - BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) PERF_EVENT_STATE_INACTIVE; } -/* - * Called at perf_event creation and when events are attached/detached from a - * group. - */ -static void perf_event__read_size(struct perf_event *event) +static void __perf_event_read_size(struct perf_event *event, int nr_siblings) { int entry = sizeof(u64); /* value */ int size = 0; @@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { - nr += event->group_leader->nr_siblings; + nr += nr_siblings; size += sizeof(u64); } @@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) event->read_size = size; } -static void perf_event__header_size(struct perf_event *event) +static void __perf_event_header_size(struct perf_event *event, u64 sample_type) { struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; u16 size = 0; - perf_event__read_size(event); - if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); @@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) event->header_size = size; } +/* + * Called at perf_event creation and when events are attached/detached from a + * group. + */ +static void perf_event__header_size(struct perf_event *event) +{ + __perf_event_read_size(event, + event->group_leader->nr_siblings); + __perf_event_header_size(event, event->attr.sample_type); +} + static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; @@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) event->id_header_size = size; } +static bool perf_event_validate_size(struct perf_event *event) +{ + /* + * The values computed here will be over-written when we actually + * attach the event. + */ + __perf_event_read_size(event, event->group_leader->nr_siblings + 1); + __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); + perf_event__id_header_size(event); + + /* + * Sum the lot; should not exceed the 64k limit we have on records. + * Conservative limit to allow for callchains and other variable fields. + */ + if (event->read_size + event->header_size + + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) + return false; + + return true; +} + static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; @@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, if (move_group) { gctx = group_leader->ctx; + mutex_lock_double(&gctx->mutex, &ctx->mutex); + } else { + mutex_lock(&ctx->mutex); + } + if (!perf_event_validate_size(event)) { + err = -E2BIG; + goto err_locked; + } + + /* + * Must be under the same ctx::mutex as perf_install_in_context(), + * because we need to serialize with concurrent event creation. + */ + if (!exclusive_event_installable(event, ctx)) { + /* exclusive and group stuff are assumed mutually exclusive */ + WARN_ON_ONCE(move_group); + + err = -EBUSY; + goto err_locked; + } + + WARN_ON_ONCE(ctx->parent_ctx); + + if (move_group) { /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ - mutex_lock_double(&gctx->mutex, &ctx->mutex); - perf_remove_from_context(group_leader, false); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, perf_remove_from_context(sibling, false); put_ctx(gctx); } - } else { - mutex_lock(&ctx->mutex); - } - WARN_ON_ONCE(ctx->parent_ctx); - - if (move_group) { /* * Wait for everybody to stop referencing the events through * the old lists, before installing it on new lists. @@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); - } - if (!exclusive_event_installable(event, ctx)) { - err = -EBUSY; - mutex_unlock(&ctx->mutex); - fput(event_file); - goto err_context; + /* + * Now that all events are installed in @ctx, nothing + * references @gctx anymore, so drop the last reference we have + * on it. + */ + put_ctx(gctx); } + /* + * Precalculate sample_data sizes; do while holding ctx::mutex such + * that we're serialized against further additions and before + * perf_install_in_context() which is the point the event is active and + * can use these values. + */ + perf_event__header_size(event); + perf_event__id_header_size(event); + perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); - if (move_group) { + if (move_group) mutex_unlock(&gctx->mutex); - put_ctx(gctx); - } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -8376,12 +8424,6 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(¤t->perf_event_mutex); /* - * Precalculate sample_data sizes - */ - perf_event__header_size(event); - perf_event__id_header_size(event); - - /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in @@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; +err_locked: + if (move_group) + mutex_unlock(&gctx->mutex); + mutex_unlock(&ctx->mutex); +/* err_file: */ + fput(event_file); err_context: perf_unpin_context(ctx); put_ctx(ctx); diff --git a/kernel/fork.c b/kernel/fork.c index 7d5f0f118a63..2845623fb582 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) tty_audit_fork(sig); sched_autogroup_fork(sig); +#ifdef CONFIG_CGROUPS + init_rwsem(&sig->group_rwsem); +#endif + sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6e40a9539763..e28169dd1c36 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -83,7 +83,7 @@ int irq_set_handler_data(unsigned int irq, void *data) if (!desc) return -EINVAL; - desc->irq_data.handler_data = data; + desc->irq_common_data.handler_data = data; irq_put_desc_unlock(desc, flags); return 0; } @@ -105,7 +105,7 @@ int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, if (!desc) return -EINVAL; - desc->irq_data.msi_desc = entry; + desc->irq_common_data.msi_desc = entry; if (entry && !irq_offset) entry->irq = irq_base; irq_put_desc_unlock(desc, flags); @@ -372,7 +372,6 @@ static bool irq_may_run(struct irq_desc *desc) /** * handle_simple_irq - Simple and software-decoded IRQs. - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt @@ -382,8 +381,7 @@ static bool irq_may_run(struct irq_desc *desc) * Note: The caller is expected to handle the ack, clear, mask and * unmask issues if necessary. */ -void -handle_simple_irq(unsigned int irq, struct irq_desc *desc) +void handle_simple_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); @@ -425,7 +423,6 @@ static void cond_unmask_irq(struct irq_desc *desc) /** * handle_level_irq - Level type irq handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has @@ -433,8 +430,7 @@ static void cond_unmask_irq(struct irq_desc *desc) * it after the associated handler has acknowledged the device, so the * interrupt line is back to inactive. */ -void -handle_level_irq(unsigned int irq, struct irq_desc *desc) +void handle_level_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); mask_ack_irq(desc); @@ -496,7 +492,6 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) /** * handle_fasteoi_irq - irq handler for transparent controllers - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() @@ -504,8 +499,7 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ -void -handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) +void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; @@ -546,7 +540,6 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); /** * handle_edge_irq - edge type IRQ handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware @@ -560,8 +553,7 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq); * the handler was running. If all pending interrupts are handled, the * loop is left. */ -void -handle_edge_irq(unsigned int irq, struct irq_desc *desc) +void handle_edge_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); @@ -618,13 +610,12 @@ EXPORT_SYMBOL(handle_edge_irq); #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER /** * handle_edge_eoi_irq - edge eoi type IRQ handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Similar as the above handle_edge_irq, but using eoi and w/o the * mask/unmask logic. */ -void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) +void handle_edge_eoi_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -665,13 +656,11 @@ out_eoi: /** * handle_percpu_irq - Per CPU local irq handler - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements */ -void -handle_percpu_irq(unsigned int irq, struct irq_desc *desc) +void handle_percpu_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -688,7 +677,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) /** * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids - * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements. Same as @@ -698,11 +686,12 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) * contain the real device id for the cpu on which this handler is * called */ -void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) +void handle_percpu_devid_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; void *dev_id = raw_cpu_ptr(action->percpu_dev_id); + unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; kstat_incr_irqs_this_cpu(desc); @@ -796,7 +785,7 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, return; __irq_do_set_handler(desc, handle, 1, NULL); - desc->irq_data.handler_data = data; + desc->irq_common_data.handler_data = data; irq_put_desc_busunlock(desc, flags); } diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index b6eeea8a80c5..e25a83b67cce 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -22,17 +22,19 @@ /** * handle_bad_irq - handle spurious and unhandled irqs - * @irq: the interrupt number * @desc: description of the interrupt * * Handles spurious and unhandled IRQ's. It also prints a debugmessage. */ -void handle_bad_irq(unsigned int irq, struct irq_desc *desc) +void handle_bad_irq(struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); + print_irq_desc(irq, desc); kstat_incr_irqs_this_cpu(desc); ack_bad_irq(irq); } +EXPORT_SYMBOL_GPL(handle_bad_irq); /* * Special, empty irq handler: diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index eee4b385cffb..5ef0c2dbe930 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -194,7 +194,7 @@ static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) static inline int irq_desc_get_node(struct irq_desc *desc) { - return irq_data_get_node(&desc->irq_data); + return irq_common_data_get_node(&desc->irq_common_data); } #ifdef CONFIG_PM_SLEEP diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 0a2a4b697bcb..239e2ae2c947 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -38,12 +38,13 @@ static void __init init_irq_default_affinity(void) #ifdef CONFIG_SMP static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { - if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, + gfp, node)) return -ENOMEM; #ifdef CONFIG_GENERIC_PENDING_IRQ if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->irq_data.affinity); + free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif @@ -52,11 +53,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) static void desc_smp_init(struct irq_desc *desc, int node) { - desc->irq_data.node = node; - cpumask_copy(desc->irq_data.affinity, irq_default_affinity); + cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif +#ifdef CONFIG_NUMA + desc->irq_common_data.node = node; +#endif } #else @@ -70,12 +73,13 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, { int cpu; + desc->irq_common_data.handler_data = NULL; + desc->irq_common_data.msi_desc = NULL; + desc->irq_data.common = &desc->irq_common_data; desc->irq_data.irq = irq; desc->irq_data.chip = &no_irq_chip; desc->irq_data.chip_data = NULL; - desc->irq_data.handler_data = NULL; - desc->irq_data.msi_desc = NULL; irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); desc->handle_irq = handle_bad_irq; @@ -121,7 +125,7 @@ static void free_masks(struct irq_desc *desc) #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif - free_cpumask_var(desc->irq_data.affinity); + free_cpumask_var(desc->irq_common_data.affinity); } #else static inline void free_masks(struct irq_desc *desc) { } @@ -343,7 +347,7 @@ int generic_handle_irq(unsigned int irq) if (!desc) return -EINVAL; - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); return 0; } EXPORT_SYMBOL_GPL(generic_handle_irq); diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 79baaf8a7813..dc9d27c0c158 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -844,7 +844,6 @@ static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, child->parent_data = irq_data; irq_data->irq = child->irq; irq_data->common = child->common; - irq_data->node = child->node; irq_data->domain = domain; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ad1b064f94fe..f9a59f6cabd2 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -192,7 +192,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: - cpumask_copy(data->affinity, mask); + cpumask_copy(desc->irq_common_data.affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); ret = 0; @@ -304,7 +304,7 @@ static void irq_affinity_notify(struct work_struct *work) if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else - cpumask_copy(cpumask, desc->irq_data.affinity); + cpumask_copy(cpumask, desc->irq_common_data.affinity); raw_spin_unlock_irqrestore(&desc->lock, flags); notify->notify(notify, cpumask); @@ -375,9 +375,9 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) * one of the targets is online. */ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { - if (cpumask_intersects(desc->irq_data.affinity, + if (cpumask_intersects(desc->irq_common_data.affinity, cpu_online_mask)) - set = desc->irq_data.affinity; + set = desc->irq_common_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } @@ -829,8 +829,8 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) * This code is triggered unconditionally. Check the affinity * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. */ - if (desc->irq_data.affinity) - cpumask_copy(mask, desc->irq_data.affinity); + if (desc->irq_common_data.affinity) + cpumask_copy(mask, desc->irq_common_data.affinity); else valid = false; raw_spin_unlock_irq(&desc->lock); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 7e6512b9dc1f..be9149f62eb8 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -228,11 +228,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info) { struct irq_chip *chip = info->chip; - BUG_ON(!chip); - if (!chip->irq_mask) - chip->irq_mask = pci_msi_mask_irq; - if (!chip->irq_unmask) - chip->irq_unmask = pci_msi_unmask_irq; + BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); if (!chip->irq_set_affinity) chip->irq_set_affinity = msi_domain_set_affinity; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 0e97c142ce40..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -12,6 +12,7 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/mutex.h> #include "internals.h" @@ -39,7 +40,7 @@ static struct proc_dir_entry *root_irq_dir; static int show_irq_affinity(int type, struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - const struct cpumask *mask = desc->irq_data.affinity; + const struct cpumask *mask = desc->irq_common_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (irqd_is_setaffinity_pending(&desc->irq_data)) @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) void register_irq_proc(unsigned int irq, struct irq_desc *desc) { + static DEFINE_MUTEX(register_lock); char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) return; + /* + * irq directories are registered only when a handler is + * added, not when the descriptor is created, so multiple + * tasks might try to register at the same time. + */ + mutex_lock(®ister_lock); + + if (desc->dir) + goto out_unlock; + memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - return; + goto out_unlock; #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); + +out_unlock: + mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index dd95f44f99b2..b86886beee4f 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -38,7 +38,7 @@ static void resend_irqs(unsigned long arg) clear_bit(irq, irqs_resend); desc = irq_to_desc(irq); local_irq_disable(); - desc->handle_irq(irq, desc); + desc->handle_irq(desc); local_irq_enable(); } } diff --git a/kernel/kmod.c b/kernel/kmod.c index da98d0593de2..0277d1216f80 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work) call_usermodehelper_exec_sync(sub_info); } else { pid_t pid; - + /* + * Use CLONE_PARENT to reparent it to kthreadd; we do not + * want to pollute current->children, and we need a parent + * that always ignores SIGCHLD to ensure auto-reaping. + */ pid = kernel_thread(call_usermodehelper_exec_async, sub_info, - SIGCHLD); + CLONE_PARENT | SIGCHLD); if (pid < 0) { sub_info->retval = pid; umh_complete(sub_info); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, - int references) + int references, int pin_count) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif - hlock->pin_count = 0; + hlock->pin_count = pin_count; if (check && !mark_irqflags(curr, hlock)) return 0; @@ -3343,7 +3343,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3433,7 +3433,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip, 0); + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 337c8818541d..87e9ce6a63c5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (pv_enabled()) goto queue; - if (virt_queued_spin_lock(lock)) + if (virt_spin_lock(lock)) return; /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { + static struct lock_class_key rcu_exp_sched_rdp_class; unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); @@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) mutex_init(&rdp->exp_funnel_mutex); rcu_boot_init_nocb_percpu_data(rdp); raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (rsp == &rcu_sched_state) + lockdep_set_class_and_name(&rdp->exp_funnel_mutex, + &rcu_exp_sched_rdp_class, + "rcu_data_exp_sched"); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3595403921bd..bcd214e4b4d6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -621,18 +621,21 @@ int get_nohz_timer_target(void) int i, cpu = smp_processor_id(); struct sched_domain *sd; - if (!idle_cpu(cpu)) + if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) return cpu; rcu_read_lock(); for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) { - if (!idle_cpu(i)) { + if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { cpu = i; goto unlock; } } } + + if (!is_housekeeping_cpu(cpu)) + cpu = housekeeping_any_cpu(); unlock: rcu_read_unlock(); return cpu; @@ -2363,8 +2366,15 @@ void wake_up_new_task(struct task_struct *p) trace_sched_wakeup_new(p); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP - if (p->sched_class->task_woken) + if (p->sched_class->task_woken) { + /* + * Nothing relies on rq->lock after this, so its fine to + * drop it. + */ + lockdep_unpin_lock(&rq->lock); p->sched_class->task_woken(rq, p); + lockdep_pin_lock(&rq->lock); + } #endif task_rq_unlock(rq, p, &flags); } @@ -2514,11 +2524,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul <manfred@colorfullife.com> + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; vtime_task_switch(prev); @@ -2666,13 +2676,20 @@ unsigned long nr_running(void) /* * Check if only the current task is running on the cpu. + * + * Caution: this function does not check that the caller has disabled + * preemption, thus the result might have a time-of-check-to-time-of-use + * race. The caller is responsible to use it correctly, for example: + * + * - from a non-preemptable section (of course) + * + * - from a thread that is bound to a single CPU + * + * - in a loop with very short iterations (e.g. a polling loop) */ bool single_task_running(void) { - if (cpu_rq(smp_processor_id())->nr_running == 1) - return true; - else - return false; + return raw_rq()->nr_running == 1; } EXPORT_SYMBOL(single_task_running); @@ -4924,7 +4941,15 @@ void init_idle(struct task_struct *idle, int cpu) idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - do_set_cpus_allowed(idle, cpumask_of(cpu)); +#ifdef CONFIG_SMP + /* + * Its possible that init_idle() gets called multiple times on a task, + * in that case do_set_cpus_allowed() will not do the right thing. + * + * And since this is boot we can forgo the serialization. + */ + set_cpus_allowed_common(idle, cpumask_of(cpu)); +#endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -4941,7 +4966,7 @@ void init_idle(struct task_struct *idle, int cpu) rq->curr = rq->idle = idle; idle->on_rq = TASK_ON_RQ_QUEUED; -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP idle->on_cpu = 1; #endif raw_spin_unlock(&rq->lock); @@ -4956,7 +4981,7 @@ void init_idle(struct task_struct *idle, int cpu) idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif } @@ -5178,24 +5203,47 @@ static void migrate_tasks(struct rq *dead_rq) break; /* - * Ensure rq->lock covers the entire task selection - * until the migration. + * pick_next_task assumes pinned rq->lock. */ lockdep_pin_lock(&rq->lock); next = pick_next_task(rq, &fake_task); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); + /* + * Rules for changing task_struct::cpus_allowed are holding + * both pi_lock and rq->lock, such that holding either + * stabilizes the mask. + * + * Drop rq->lock is not quite as disastrous as it usually is + * because !cpu_active at this point, which means load-balance + * will not interfere. Also, stop-machine. + */ + lockdep_unpin_lock(&rq->lock); + raw_spin_unlock(&rq->lock); + raw_spin_lock(&next->pi_lock); + raw_spin_lock(&rq->lock); + + /* + * Since we're inside stop-machine, _nothing_ should have + * changed the task, WARN if weird stuff happened, because in + * that case the above rq->lock drop is a fail too. + */ + if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { + raw_spin_unlock(&next->pi_lock); + continue; + } + /* Find suitable destination for @next, with force if needed. */ dest_cpu = select_fallback_rq(dead_rq->cpu, next); - lockdep_unpin_lock(&rq->lock); rq = __migrate_task(rq, next, dest_cpu); if (rq != dead_rq) { raw_spin_unlock(&rq->lock); rq = dead_rq; raw_spin_lock(&rq->lock); } + raw_spin_unlock(&next->pi_lock); } rq->stop = stop; @@ -7197,9 +7245,6 @@ void __init sched_init_smp(void) alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); - /* nohz_full won't take effect without isolating the cpus. */ - tick_nohz_full_add_cpus_to(cpu_isolated_map); - sched_init_numa(); /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fc8f01083527..8b0a15e285f9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * Queueing this task back might have overloaded rq, check if we need * to kick someone away. */ - if (has_pushable_dl_tasks(rq)) + if (has_pushable_dl_tasks(rq)) { + /* + * Nothing relies on rq->lock after this, so its safe to drop + * rq->lock. + */ + lockdep_unpin_lock(&rq->lock); push_dl_task(rq); + lockdep_pin_lock(&rq->lock); + } #endif unlock: @@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) int target = find_later_rq(p); if (target != -1 && - dl_time_before(p->dl.deadline, - cpu_rq(target)->dl.earliest_dl.curr)) + (dl_time_before(p->dl.deadline, + cpu_rq(target)->dl.earliest_dl.curr) || + (cpu_rq(target)->dl.dl_nr_running == 0))) cpu = target; } rcu_read_unlock(); @@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) later_rq = cpu_rq(cpu); - if (!dl_time_before(task->dl.deadline, + if (later_rq->dl.dl_nr_running && + !dl_time_before(task->dl.deadline, later_rq->dl.earliest_dl.curr)) { /* * Target rq has tasks of equal or earlier deadline, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6e2e3483b1ec..9a5e60fe721a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2363,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) */ tg_weight = atomic_long_read(&tg->load_avg); tg_weight -= cfs_rq->tg_load_avg_contrib; - tg_weight += cfs_rq_load_avg(cfs_rq); + tg_weight += cfs_rq->load.weight; return tg_weight; } @@ -2373,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) long tg_weight, load, shares; tg_weight = calc_tg_weight(tg, cfs_rq); - load = cfs_rq_load_avg(cfs_rq); + load = cfs_rq->load.weight; shares = (tg->shares * load); if (tg_weight) @@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { - int decayed; struct sched_avg *sa = &cfs_rq->avg; + int decayed, removed = 0; if (atomic_long_read(&cfs_rq->removed_load_avg)) { long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); sa->load_avg = max_t(long, sa->load_avg - r, 0); sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); + removed = 1; } if (atomic_long_read(&cfs_rq->removed_util_avg)) { @@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - return decayed; + return decayed || removed; } /* Update task and its cfs_rq load average */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8f177c73ae19..4a2ef5a02fd3 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void) rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); + stop_critical_timings(); while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); + start_critical_timings(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); return 1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 68cda117574c..6d2a119c7ad9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * After ->on_cpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. + * + * Pairs with the control dependency and rmb in try_to_wake_up(). */ - smp_wmb(); - prev->on_cpu = 0; + smp_store_release(&prev->on_cpu, 0); #endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 272d9322bc5d..052e02672d12 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -106,10 +106,9 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) } EXPORT_SYMBOL_GPL(__wake_up_locked); -void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, - void *key) +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) { - __wake_up_common(q, mode, nr, 0, key); + __wake_up_common(q, mode, 1, 0, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_key); @@ -284,7 +283,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, if (!list_empty(&wait->task_list)) list_del_init(&wait->task_list); else if (waitqueue_active(q)) - __wake_up_locked_key(q, mode, 1, key); + __wake_up_locked_key(q, mode, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(abort_exclusive_wait); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 50eb107f1198..a9b76a40319e 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -97,20 +97,6 @@ EXPORT_SYMBOL_GPL(clockevent_delta2ns); static int __clockevents_switch_state(struct clock_event_device *dev, enum clock_event_state state) { - /* Transition with legacy set_mode() callback */ - if (dev->set_mode) { - /* Legacy callback doesn't support new modes */ - if (state > CLOCK_EVT_STATE_ONESHOT) - return -ENOSYS; - /* - * 'clock_event_state' and 'clock_event_mode' have 1-to-1 - * mapping until *_ONESHOT, and so a simple cast will work. - */ - dev->set_mode((enum clock_event_mode)state, dev); - dev->mode = (enum clock_event_mode)state; - return 0; - } - if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; @@ -204,12 +190,8 @@ int clockevents_tick_resume(struct clock_event_device *dev) { int ret = 0; - if (dev->set_mode) { - dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); - dev->mode = CLOCK_EVT_MODE_RESUME; - } else if (dev->tick_resume) { + if (dev->tick_resume) ret = dev->tick_resume(dev); - } return ret; } @@ -460,26 +442,6 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) } EXPORT_SYMBOL_GPL(clockevents_unbind_device); -/* Sanity check of state transition callbacks */ -static int clockevents_sanity_check(struct clock_event_device *dev) -{ - /* Legacy set_mode() callback */ - if (dev->set_mode) { - /* We shouldn't be supporting new modes now */ - WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || - dev->set_state_shutdown || dev->tick_resume || - dev->set_state_oneshot_stopped); - - BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); - return 0; - } - - if (dev->features & CLOCK_EVT_FEAT_DUMMY) - return 0; - - return 0; -} - /** * clockevents_register_device - register a clock event device * @dev: device to register @@ -488,8 +450,6 @@ void clockevents_register_device(struct clock_event_device *dev) { unsigned long flags; - BUG_ON(clockevents_sanity_check(dev)); - /* Initialize state to DETACHED */ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) continue; /* Check the deviation from the watchdog clocksource. */ - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", cs->name); pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index d11c55b6ab7d..4fcd99e12aa0 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -398,7 +398,6 @@ void tick_shutdown(unsigned int cpu) * the set mode function! */ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); - dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); dev->event_handler = clockevents_handle_noop; td->evtdev = NULL; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3319e16f31e5..7c7ec4515983 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -290,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str) __setup("nohz_full=", tick_nohz_full_setup); static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: /* - * If we handle the timekeeping duty for full dynticks CPUs, - * we can't safely shutdown that CPU. + * The boot CPU handles housekeeping duty (unbound timers, + * workqueues, timekeeping, ...) on behalf of full dynticks + * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && tick_do_timer_cpu == cpu) return NOTIFY_BAD; @@ -370,6 +371,12 @@ void __init tick_nohz_init(void) cpu_notifier(tick_nohz_cpu_down_callback, 0); pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); + + /* + * We need at least one CPU to handle housekeeping work such + * as timekeeping, unbound timers, workqueues, ... + */ + WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); } #endif diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f6ee2e6b6f5d..44d2cc0436f4 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1251,7 +1251,7 @@ void __init timekeeping_init(void) set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); tk_set_wall_to_mono(tk, tmp); - timekeeping_update(tk, TK_MIRROR); + timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); @@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk, negative = (tick_error < 0); /* Sort out the magnitude of the correction */ - tick_error = abs(tick_error); + tick_error = abs64(tick_error); for (adj = 0; tick_error > interval; adj++) tick_error >>= 1; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 129c96033e46..f75e35b60149 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -225,7 +225,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) (unsigned long long) dev->min_delta_ns); SEQ_printf(m, " mult: %u\n", dev->mult); SEQ_printf(m, " shift: %u\n", dev->shift); - SEQ_printf(m, " mode: %d\n", dev->mode); + SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev)); SEQ_printf(m, " next_event: %Ld nsecs\n", (unsigned long long) ktime_to_ns(dev->next_event)); @@ -233,40 +233,34 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) print_name_offset(m, dev->set_next_event); SEQ_printf(m, "\n"); - if (dev->set_mode) { - SEQ_printf(m, " set_mode: "); - print_name_offset(m, dev->set_mode); + if (dev->set_state_shutdown) { + SEQ_printf(m, " shutdown: "); + print_name_offset(m, dev->set_state_shutdown); SEQ_printf(m, "\n"); - } else { - if (dev->set_state_shutdown) { - SEQ_printf(m, " shutdown: "); - print_name_offset(m, dev->set_state_shutdown); - SEQ_printf(m, "\n"); - } + } - if (dev->set_state_periodic) { - SEQ_printf(m, " periodic: "); - print_name_offset(m, dev->set_state_periodic); - SEQ_printf(m, "\n"); - } + if (dev->set_state_periodic) { + SEQ_printf(m, " periodic: "); + print_name_offset(m, dev->set_state_periodic); + SEQ_printf(m, "\n"); + } - if (dev->set_state_oneshot) { - SEQ_printf(m, " oneshot: "); - print_name_offset(m, dev->set_state_oneshot); - SEQ_printf(m, "\n"); - } + if (dev->set_state_oneshot) { + SEQ_printf(m, " oneshot: "); + print_name_offset(m, dev->set_state_oneshot); + SEQ_printf(m, "\n"); + } - if (dev->set_state_oneshot_stopped) { - SEQ_printf(m, " oneshot stopped: "); - print_name_offset(m, dev->set_state_oneshot_stopped); - SEQ_printf(m, "\n"); - } + if (dev->set_state_oneshot_stopped) { + SEQ_printf(m, " oneshot stopped: "); + print_name_offset(m, dev->set_state_oneshot_stopped); + SEQ_printf(m, "\n"); + } - if (dev->tick_resume) { - SEQ_printf(m, " resume: "); - print_name_offset(m, dev->tick_resume); - SEQ_printf(m, "\n"); - } + if (dev->tick_resume) { + SEQ_printf(m, " resume: "); + print_name_offset(m, dev->tick_resume); + SEQ_printf(m, "\n"); } SEQ_printf(m, " event_handler: "); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b746399ab59c..8abf1ba18085 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -85,9 +85,19 @@ check_stack(unsigned long ip, unsigned long *stack) if (!object_is_on_stack(stack)) return; + /* Can't do this from NMI context (can cause deadlocks) */ + if (in_nmi()) + return; + local_irq_save(flags); arch_spin_lock(&max_stack_lock); + /* + * RCU may not be watching, make it see us. + * The stack trace code uses rcu_sched. + */ + rcu_irq_enter(); + /* In case another CPU set the tracer_frame on us */ if (unlikely(!frame_size)) this_size -= tracer_frame; @@ -169,6 +179,7 @@ check_stack(unsigned long ip, unsigned long *stack) } out: + rcu_irq_exit(); arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ca71582fcfab..bcb14cafe007 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); dwork->wq = wq; + /* timer isn't guaranteed to run in this cpu, record earlier */ + if (cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); dwork->cpu = cpu; timer->expires = jiffies + delay; - if (unlikely(cpu != WORK_CPU_UNBOUND)) - add_timer_on(timer, cpu); - else - add_timer(timer); + add_timer_on(timer, cpu); } /** diff --git a/lib/Kconfig b/lib/Kconfig index 2e491ac15622..f0df318104e7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -220,6 +220,7 @@ config ZLIB_INFLATE config ZLIB_DEFLATE tristate + select BITREVERSE config LZO_COMPRESS tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ab76b99adc85..1d1521c26302 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 + default 0 if KASAN default 1024 if !64BIT default 2048 if 64BIT help diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f1cdeb024d17..6a823a53e357 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr) printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, - attr->probability, attr->interval, + attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) diff --git a/lib/iommu-common.c b/lib/iommu-common.c index ff19f66d3f7f..b1c93e94ca7a 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c @@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common); static inline bool need_flush(struct iommu_map_table *iommu) { - return (iommu->lazy_flush != NULL && - (iommu->flags & IOMMU_NEED_FLUSH) != 0); + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); } static inline void set_flush(struct iommu_map_table *iommu) @@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, goto bail; } } - if (n < pool->hint || need_flush(iommu)) { + if (iommu->lazy_flush && + (n < pool->hint || need_flush(iommu))) { clear_flush(iommu); iommu->lazy_flush(iommu); } diff --git a/lib/rhashtable.c b/lib/rhashtable.c index cc0c69710dcf..a54ff8949f91 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) head = rht_dereference_bucket(new_tbl->buckets[new_hash], new_tbl, new_hash); - if (rht_is_a_nulls(head)) - INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); - else - RCU_INIT_POINTER(entry->next, head); + RCU_INIT_POINTER(entry->next, head); rcu_assign_pointer(new_tbl->buckets[new_hash], entry); spin_unlock(new_bucket_lock); diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..84775ba873b9 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,10 @@ #include <linux/bug.h> #include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/word-at-a-time.h> +#include <asm/page.h> + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); + return res + find_zero(data); + } + *(unsigned long *)(dest+res) = c; + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 54036ce2e2dd..5939f63d90cd 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, } exp = divisor[units] / (u32)blk_size; - if (size >= exp) { + /* + * size must be strictly greater than exp here to ensure that remainder + * is greater than divisor[units] coming out of the if below. + */ + if (size > exp) { remainder = do_div(size, divisor[units]); remainder *= blk_size; i++; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 2df8ddcb0ca0..619984fc07ec 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work) release_work); struct backing_dev_info *bdi = wb->bdi; + spin_lock_irq(&cgwb_lock); + list_del_rcu(&wb->bdi_node); + spin_unlock_irq(&cgwb_lock); + wb_shutdown(wb); css_put(wb->memcg_css); @@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi, ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); if (!ret) { atomic_inc(&bdi->usage_cnt); + list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); css_get(memcg_css); @@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { struct radix_tree_iter iter; - struct bdi_writeback_congested *congested, *congested_n; + struct rb_node *rbn; void **slot; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); @@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); - rbtree_postorder_for_each_entry_safe(congested, congested_n, - &bdi->cgwb_congested_tree, rb_node) { - rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); + while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { + struct bdi_writeback_congested *congested = + rb_entry(rbn, struct bdi_writeback_congested, rb_node); + + rb_erase(rbn, &bdi->cgwb_congested_tree); congested->bdi = NULL; /* mark @congested unlinked */ } @@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } int bdi_init(struct backing_dev_info *bdi) { + int ret; + bdi->dev = NULL; bdi->min_ratio = 0; bdi->max_ratio = 100; bdi->max_prop_frac = FPROP_FRAC_BASE; INIT_LIST_HEAD(&bdi->bdi_list); + INIT_LIST_HEAD(&bdi->wb_list); init_waitqueue_head(&bdi->wb_waitq); - return cgwb_bdi_init(bdi); + ret = cgwb_bdi_init(bdi); + + list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); + + return ret; } EXPORT_SYMBOL(bdi_init); @@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) synchronize_rcu_expedited(); } -void bdi_destroy(struct backing_dev_info *bdi) +void bdi_unregister(struct backing_dev_info *bdi) { /* make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); @@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi) device_unregister(bdi->dev); bdi->dev = NULL; } +} +void bdi_exit(struct backing_dev_info *bdi) +{ + WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); } + +void bdi_destroy(struct backing_dev_info *bdi) +{ + bdi_unregister(bdi); + bdi_exit(bdi); +} EXPORT_SYMBOL(bdi_destroy); /* @@ -361,7 +361,7 @@ err: * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) +struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) { unsigned long mask, offset, pfn, start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; @@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) if (!cma || !cma->count) return NULL; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; - if (dma < (page->dma + pool->allocation)) + if ((dma - page->dma) < pool->allocation) return page; } return NULL; diff --git a/mm/filemap.c b/mm/filemap.c index 72940fb38666..327910c2400c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2473,6 +2473,26 @@ ssize_t generic_perform_write(struct file *file, iov_iter_count(i)); again: + /* + * Bring in the user page that we will copy from _first_. + * Otherwise there's a nasty deadlock on copying from the + * same page as we're writing to, without it being marked + * up-to-date. + * + * Not only is this an optimisation, but it is also required + * to check that the address is actually valid, when atomic + * usercopies are used, below. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } + + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2480,17 +2500,8 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - /* - * 'page' is now locked. If we are trying to copy from a - * mapping of 'page' in userspace, the copy might fault and - * would need PageUptodate() to complete. But, page can not be - * made Uptodate without acquiring the page lock, which we hold. - * Deadlock. Avoid with pagefault_disable(). Fix up below with - * iov_iter_fault_in_readable(). - */ - pagefault_disable(); + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); - pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, @@ -2513,24 +2524,12 @@ again: */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); - /* - * This is the fallback to recover if the copy from - * userspace above faults. - */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { - status = -EFAULT; - break; - } goto again; } pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); - if (fatal_signal_pending(current)) { - status = -EINTR; - break; - } } while (iov_iter_count(i)); return written ? written : status; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b06b8db9df2..bbac913f96bc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { + if (pte_none(pteval) || (pte_present(pteval) && + is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) continue; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, continue; /* + * Shared VMAs have their own reserves and do not affect + * MAP_PRIVATE accounting but it is possible that a shared + * VMA is using the same page so check and skip such VMAs. + */ + if (iter_vma->vm_flags & VM_MAYSHARE) + continue; + + /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 7b28e9cdf1c7..8da211411b57 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) if (unlikely(*shadow_addr)) { u16 shadow_first_bytes = *(u16 *)shadow_addr; - s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK; if (unlikely(shadow_first_bytes)) return true; - if (likely(!last_byte)) + if (likely(IS_ALIGNED(addr, 8))) return false; return memory_is_poisoned_1(addr + 15); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..c57c4423c688 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) } /* + * Return page count for single (non recursive) @memcg. + * * Implementation Note: reading percpu statistics for memcg. * * Both of vmstat[] and percpu_counter has threshold and do periodic * synchronization to implement "quick" read. There are trade-off between * reading cost and precision of value. Then, we may have a chance to implement - * a periodic synchronizion of counter in memcg's counter. + * a periodic synchronization of counter in memcg's counter. * * But this _read() function is used for user interface now. The user accounts * memory usage by memory cgroup and he _always_ requires exact value because @@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) * * If there are kernel internal actions which can make use of some not-exact * value, and reading all cpu value can be performance bottleneck in some - * common workload, threashold and synchonization as vmstat[] should be + * common workload, threshold and synchronization as vmstat[] should be * implemented. */ -static long mem_cgroup_read_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long +mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { long val = 0; int cpu; + /* Per-cpu values can be negative, use a signed accumulator */ for_each_possible_cpu(cpu) val += per_cpu(memcg->stat->count[idx], cpu); + /* + * Summing races with updates, so val may be negative. Avoid exposing + * transient negative values. + */ + if (val < 0) + val = 0; return val; } @@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], K(mem_cgroup_read_stat(iter, i))); } @@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; - long val = 0; + unsigned long val = 0; - /* Per-cpu values can be negative, use a signed accumulator */ for_each_mem_cgroup_tree(iter, memcg) val += mem_cgroup_read_stat(iter, idx); - if (val < 0) /* race ? */ - val = 0; return val; } @@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); } @@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - long long val = 0; + unsigned long long val = 0; if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; for_each_mem_cgroup_tree(mi, memcg) val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); } for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { @@ -3381,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, ret = page_counter_memparse(args, "-1", &threshold); if (ret) return ret; + threshold <<= PAGE_SHIFT; mutex_lock(&memcg->thresholds_lock); @@ -3734,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) /** * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg * @wb: bdi_writeback in question - * @pavail: out parameter for number of available pages + * @pfilepages: out parameter for number of file pages + * @pheadroom: out parameter for number of allocatable pages according to memcg * @pdirty: out parameter for number of dirty pages * @pwriteback: out parameter for number of pages under writeback * - * Determine the numbers of available, dirty, and writeback pages in @wb's - * memcg. Dirty and writeback are self-explanatory. Available is a bit - * more involved. + * Determine the numbers of file, headroom, dirty, and writeback pages in + * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom + * is a bit more involved. * - * A memcg's headroom is "min(max, high) - used". The available memory is - * calculated as the lowest headroom of itself and the ancestors plus the - * number of pages already being used for file pages. Note that this - * doesn't consider the actual amount of available memory in the system. - * The caller should further cap *@pavail accordingly. + * A memcg's headroom is "min(max, high) - used". In the hierarchy, the + * headroom is calculated as the lowest headroom of itself and the + * ancestors. Note that this doesn't consider the actual amount of + * available memory in the system. The caller should further cap + * *@pheadroom accordingly. */ -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback) +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback) { struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - unsigned long head_room = PAGE_COUNTER_MAX; - unsigned long file_pages; *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); /* this should eventually include NR_UNSTABLE_NFS */ *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | + (1 << LRU_ACTIVE_FILE)); + *pheadroom = PAGE_COUNTER_MAX; - file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | - (1 << LRU_ACTIVE_FILE)); while ((parent = parent_mem_cgroup(memcg))) { unsigned long ceiling = min(memcg->memory.limit, memcg->high); unsigned long used = page_counter_read(&memcg->memory); - head_room = min(head_room, ceiling - min(ceiling, used)); + *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); memcg = parent; } - - *pavail = file_pages + head_room; } #else /* CONFIG_CGROUP_WRITEBACK */ @@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg_wb_domain_init(memcg, GFP_KERNEL)) goto out_free_stat; - spin_lock_init(&memcg->pcp_counter_lock); return memcg; out_free_stat: diff --git a/mm/memory.c b/mm/memory.c index 9cb27470fee9..deb679c31f2a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; + + /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); diff --git a/mm/migrate.c b/mm/migrate.c index c3cb566af3e2..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (PageSwapBacked(page)) SetPageSwapBacked(newpage); + /* + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus + * needs newpage's memcg set to transfer memcg dirty page accounting. + * So perform memcg migration in two steps: + * 1. set newpage->mem_cgroup (here) + * 2. clear page->mem_cgroup (below) + */ + set_page_memcg(newpage, page_memcg(page)); + mapping = page_mapping(page); if (!mapping) rc = migrate_page(mapping, newpage, page, mode); @@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, rc = fallback_migrate_page(mapping, newpage, page, mode); if (rc != MIGRATEPAGE_SUCCESS) { + set_page_memcg(newpage, NULL); newpage->mapping = NULL; } else { - mem_cgroup_migrate(page, newpage, false); + set_page_memcg(page, NULL); if (page_was_mapped) remove_migration_ptes(page, newpage); page->mapping = NULL; @@ -1075,7 +1085,7 @@ out: if (rc != MIGRATEPAGE_SUCCESS && put_new_page) put_new_page(new_hpage, private); else - put_page(new_hpage); + putback_active_hugepage(new_hpage); if (result) { if (rc) diff --git a/mm/mmap.c b/mm/mmap.c index 971dd2cb77d2..79bcc9f92e48 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { - WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops"); - /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); @@ -1492,13 +1490,14 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) int vma_wants_writenotify(struct vm_area_struct *vma) { vm_flags_t vm_flags = vma->vm_flags; + const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) + if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify @@ -1638,12 +1637,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, */ WARN_ON_ONCE(addr != vma->vm_start); - /* All file mapping must have ->vm_ops set */ - if (!vma->vm_ops) { - static const struct vm_operations_struct dummy_ops = {}; - vma->vm_ops = &dummy_ops; - } - addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0a931cdd4f6b..2c90357c34ea 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -145,9 +145,6 @@ struct dirty_throttle_control { unsigned long pos_ratio; }; -#define DTC_INIT_COMMON(__wb) .wb = (__wb), \ - .wb_completions = &(__wb)->completions - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -157,12 +154,16 @@ struct dirty_throttle_control { #ifdef CONFIG_CGROUP_WRITEBACK -#define GDTC_INIT(__wb) .dom = &global_wb_domain, \ - DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .dom = &global_wb_domain, \ + .wb_completions = &(__wb)->completions + #define GDTC_INIT_NO_WB .dom = &global_wb_domain -#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ - .gdtc = __gdtc, \ - DTC_INIT_COMMON(__wb) + +#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ + .dom = mem_cgroup_wb_domain(__wb), \ + .wb_completions = &(__wb)->memcg_completions, \ + .gdtc = __gdtc static bool mdtc_valid(struct dirty_throttle_control *dtc) { @@ -213,7 +214,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, #else /* CONFIG_CGROUP_WRITEBACK */ -#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB #define MDTC_INIT(__wb, __gdtc) @@ -682,13 +684,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom, return max(thresh, dom->dirty_limit); } -/* memory available to a memcg domain is capped by system-wide clean memory */ -static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) +/* + * Memory which can be further allocated to a memcg domain is capped by + * system-wide clean memory excluding the amount being used in the domain. + */ +static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, + unsigned long filepages, unsigned long headroom) { struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); - unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long clean = filepages - min(filepages, mdtc->dirty); + unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long other_clean = global_clean - min(global_clean, clean); - mdtc->avail = min(mdtc->avail, clean); + mdtc->avail = filepages + min(headroom, other_clean); } /** @@ -1562,16 +1570,16 @@ static void balance_dirty_pages(struct address_space *mapping, } if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain. */ - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, - &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, + &mdtc->dirty, &writeback); mdtc->dirty += writeback; + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); @@ -1893,10 +1901,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) return true; if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, + &writeback); + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); /* ditto, ignore writeback */ if (mdtc->dirty > mdtc->bg_thresh) @@ -1956,7 +1965,6 @@ void laptop_mode_timer_fn(unsigned long data) int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); struct bdi_writeback *wb; - struct wb_iter iter; /* * We want to write everything out, not just down to the dirty @@ -1965,10 +1973,12 @@ void laptop_mode_timer_fn(unsigned long data) if (!bdi_has_dirty_io(&q->backing_dev_info)) return; - bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) + rcu_read_lock(); + list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) if (wb_has_dirty_io(wb)) wb_start_writeback(wb, nr_pages, true, WB_REASON_LAPTOP_TIMER); + rcu_read_unlock(); } /* diff --git a/mm/readahead.c b/mm/readahead.c index 60cd846a9a44..24682f6f4cfd 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (!add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { mapping->a_ops->readpage(filp, page); } page_cache_release(page); diff --git a/mm/slab.c b/mm/slab.c index c77ebe6cc87c..4fcc5dd8d5a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= kmalloc_size(INDEX_NODE + 1) - && cachep->object_size > cache_line_size() - && ALIGN(size, cachep->align) < PAGE_SIZE) { + /* + * To activate debug pagealloc, off-slab management is necessary + * requirement. In early phase of initialization, small sized slab + * doesn't get initialized so it would not be possible. So, we need + * to check size >= 256. It guarantees that all necessary small + * sized slab is initialized in current slab initialization sequence. + */ + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + size >= 256 && cachep->object_size > cache_line_size() && + ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 2d978b28a410..7f63a9381f71 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc) if (!memcg) return true; #ifdef CONFIG_CGROUP_WRITEBACK - if (memcg->css.cgroup) + if (cgroup_on_dfl(memcg->css.cgroup)) return true; #endif return false; diff --git a/mm/vmstat.c b/mm/vmstat.c index 4f5cd974e11a..fbf14485a049 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off; static void vmstat_update(struct work_struct *w) { - if (refresh_cpu_vm_stats()) + if (refresh_cpu_vm_stats()) { /* * Counters were updated so we expect more updates * to occur in the future. Keep on running the * update worker thread. */ - schedule_delayed_work(this_cpu_ptr(&vmstat_work), + schedule_delayed_work_on(smp_processor_id(), + this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - else { + } else { /* * We did not update any counters so the app may be in * a mode where it does not cause counter updates. diff --git a/net/atm/clip.c b/net/atm/clip.c index 17e55dfecbe2..e07f551a863c 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh) static int clip_encap(struct atm_vcc *vcc, int mode) { + if (!CLIP_VCC(vcc)) + return -EBADFD; + CLIP_VCC(vcc)->encap = mode; return 0; } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b4548c739a64..2dda439c8cb8 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -91,10 +91,50 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) * autoconnect action, remove them completely. If they are, just unmark * them as waiting for connection, by clearing explicit_connect field. */ - if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT) + params->explicit_connect = false; + + list_del_init(¶ms->action); + + switch (params->auto_connect) { + case HCI_AUTO_CONN_EXPLICIT: hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type); - else - params->explicit_connect = false; + /* return instead of break to avoid duplicate scan update */ + return; + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_add(¶ms->action, &conn->hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + list_add(¶ms->action, &conn->hdev->pend_le_reports); + break; + default: + break; + } + + hci_update_background_scan(conn->hdev); +} + +static void hci_conn_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) + hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + + hci_chan_list_flush(conn); + + hci_conn_hash_del(hdev, conn); + + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); + + hci_conn_del_sysfs(conn); + + debugfs_remove_recursive(conn->debugfs); + + hci_dev_put(hdev); + + hci_conn_put(conn); } /* This function requires the caller holds hdev->lock */ @@ -102,8 +142,13 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn) { hci_connect_le_scan_cleanup(conn); - hci_conn_hash_del(conn->hdev, conn); - hci_update_background_scan(conn->hdev); + /* We can't call hci_conn_del here since that would deadlock + * with trying to call cancel_delayed_work_sync(&conn->disc_work). + * Instead, call just hci_conn_cleanup() which contains the bare + * minimum cleanup operations needed for a connection in this + * state. + */ + hci_conn_cleanup(conn); } static void hci_acl_create_connection(struct hci_conn *conn) @@ -581,27 +626,17 @@ int hci_conn_del(struct hci_conn *conn) } } - hci_chan_list_flush(conn); - if (conn->amp_mgr) amp_mgr_put(conn->amp_mgr); - hci_conn_hash_del(hdev, conn); - if (hdev->notify) - hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); - skb_queue_purge(&conn->data_q); - hci_conn_del_sysfs(conn); - - debugfs_remove_recursive(conn->debugfs); - - if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) - hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); - - hci_dev_put(hdev); - - hci_conn_put(conn); + /* Remove the connection from the list and cleanup its remaining + * state. This is a separate function since for some cases like + * BT_CONNECT_SCAN we *only* want the cleanup part without the + * rest of hci_conn_del. + */ + hci_conn_cleanup(conn); return 0; } @@ -973,15 +1008,23 @@ static int hci_explicit_conn_params_set(struct hci_request *req, if (is_connected(hdev, addr, addr_type)) return -EISCONN; - params = hci_conn_params_add(hdev, addr, addr_type); - if (!params) - return -EIO; + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (!params) { + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return -ENOMEM; - /* If we created new params, or existing params were marked as disabled, - * mark them to be used just once to connect. - */ - if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { + /* If we created new params, mark them to be deleted in + * hci_connect_le_scan_cleanup. It's different case than + * existing disabled params, those will stay after cleanup. + */ params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + } + + /* We're trying to connect, so make sure params are at pend_le_conns */ + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_REPORT || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { list_del_init(¶ms->action); list_add(¶ms->action, &hdev->pend_le_conns); } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index adcbc74c2432..e837539452fb 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2861,13 +2861,6 @@ struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev, return param; } - list_for_each_entry(param, &hdev->pend_le_reports, action) { - if (bacmp(¶m->addr, addr) == 0 && - param->addr_type == addr_type && - param->explicit_connect) - return param; - } - return NULL; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 186041866315..bc31099d3b5b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) wake_up_bit(&hdev->flags, HCI_INQUIRY); hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + /* Set discovery state to stopped if we're not doing LE active + * scanning. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hdev->le_scan_type != LE_SCAN_ACTIVE) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); hci_conn_check_pending(hdev); @@ -4648,8 +4653,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, /* If we're not connectable only connect devices that we have in * our pend_le_conns list. */ - params = hci_explicit_connect_lookup(hdev, addr, addr_type); - + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, + addr_type); if (!params) return NULL; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ccaf5a436d8f..c4fe2fee753f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3545,6 +3545,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, auth_type); } else { u8 addr_type; + struct hci_conn_params *p; /* Convert from L2CAP channel address type to HCI address type */ @@ -3562,7 +3563,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, * If connection parameters already exist, then they * will be kept and this function does nothing. */ - hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + + if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) + p->auto_connect = HCI_AUTO_CONN_DISABLED; conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, sec_level, @@ -6117,14 +6121,21 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr, __hci_update_background_scan(req); break; case HCI_AUTO_CONN_REPORT: - list_add(¶ms->action, &hdev->pend_le_reports); + if (params->explicit_connect) + list_add(¶ms->action, &hdev->pend_le_conns); + else + list_add(¶ms->action, &hdev->pend_le_reports); __hci_update_background_scan(req); break; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: if (!is_connected(hdev, addr, addr_type)) { list_add(¶ms->action, &hdev->pend_le_conns); - __hci_update_background_scan(req); + /* If we are in scan phase of connecting, we were + * already added to pend_le_conns and scanning. + */ + if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT) + __hci_update_background_scan(req); } break; } @@ -6379,7 +6390,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS); mgmt_pending_remove(cmd); @@ -6415,6 +6427,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, if (p->auto_connect == HCI_AUTO_CONN_DISABLED) continue; device_removed(sk, hdev, &p->addr, p->addr_type); + if (p->explicit_connect) { + p->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; + } list_del(&p->action); list_del(&p->list); kfree(p); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ad82324f710f..0510a577a7b5 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (!conn) return 1; - chan = conn->smp; - if (!chan) { - BT_ERR("SMP security requested but not available"); - return 1; - } - if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) return 1; @@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; + chan = conn->smp; + if (!chan) { + BT_ERR("SMP security requested but not available"); + return 1; + } + l2cap_chan_lock(chan); /* If SMP is already in progress ignore this request */ diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 66efdc21f548..480b3de1a0e3 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1006,7 +1006,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, ih = igmpv3_report_hdr(skb); num = ntohs(ih->ngrec); - len = sizeof(*ih); + len = skb_transport_offset(skb) + sizeof(*ih); for (i = 0; i < num; i++) { len += sizeof(*grec); @@ -1067,7 +1067,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, icmp6h = icmp6_hdr(skb); num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); - len = sizeof(*icmp6h); + len = skb_transport_offset(skb) + sizeof(*icmp6h); for (i = 0; i < num; i++) { __be16 *nsrcs, _nsrcs; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 525f454f7531..b9b0e3b5da49 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1353,11 +1353,12 @@ static void prepare_write_keepalive(struct ceph_connection *con) dout("prepare_write_keepalive %p\n", con); con_out_kvec_reset(con); if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { - struct timespec ts = CURRENT_TIME; - struct ceph_timespec ceph_ts; - ceph_encode_timespec(&ceph_ts, &ts); + struct timespec now = CURRENT_TIME; + con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); - con_out_kvec_add(con, sizeof(ceph_ts), &ceph_ts); + ceph_encode_timespec(&con->out_temp_keepalive2, &now); + con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), + &con->out_temp_keepalive2); } else { con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 80b94e37c94a..f79ccac6699f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, switch (op->op) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: ceph_osd_data_release(&op->extent.osd_data); break; case CEPH_OSD_OP_CALL: @@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && - opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE); + opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && + opcode != CEPH_OSD_OP_TRUNCATE); op->extent.offset = offset; op->extent.length = length; op->extent.truncate_size = truncate_size; op->extent.truncate_seq = truncate_seq; - if (opcode == CEPH_OSD_OP_WRITE) + if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) payload_len += length; op->payload_len = payload_len; @@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: case CEPH_OSD_OP_ZERO: case CEPH_OSD_OP_TRUNCATE: - if (src->op == CEPH_OSD_OP_WRITE) + if (src->op == CEPH_OSD_OP_WRITE || + src->op == CEPH_OSD_OP_WRITEFULL) request_data_len = src->extent.length; dst->extent.offset = cpu_to_le64(src->extent.offset); dst->extent.length = cpu_to_le64(src->extent.length); @@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); osd_data = &src->extent.osd_data; - if (src->op == CEPH_OSD_OP_WRITE) + if (src->op == CEPH_OSD_OP_WRITE || + src->op == CEPH_OSD_OP_WRITEFULL) ceph_osdc_msg_data_add(req->r_request, osd_data); else ceph_osdc_msg_data_add(req->r_reply, osd_data); diff --git a/net/core/dev.c b/net/core/dev.c index 877c84834d81..6bb6470f5b7b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4713,6 +4713,8 @@ void napi_disable(struct napi_struct *n) while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) msleep(1); + while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) + msleep(1); hrtimer_cancel(&n->timer); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b495ab1797fa..29edf74846fc 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) gstrings.len = ret; - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER); if (!data) return -ENOMEM; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bf77e3639ce0..365de66436ac 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -631,15 +631,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, { int idx = 0; struct fib_rule *rule; + int err = 0; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { if (idx < cb->args[1]) goto skip; - if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWRULE, - NLM_F_MULTI, ops) < 0) + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWRULE, + NLM_F_MULTI, ops); + if (err) break; skip: idx++; @@ -648,7 +650,7 @@ skip: cb->args[1] = idx; rules_ops_put(ops); - return skb->len; + return err; } static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) @@ -664,7 +666,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) if (ops == NULL) return -EAFNOSUPPORT; - return dump_rules(skb, cb, ops); + dump_rules(skb, cb, ops); + + return skb->len; } rcu_read_lock(); diff --git a/net/core/filter.c b/net/core/filter.c index 13079f03902e..bb18c3680001 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -478,9 +478,9 @@ do_pass: bpf_src = BPF_X; } else { insn->dst_reg = BPF_REG_A; - insn->src_reg = BPF_REG_X; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); + insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; } /* Common case where 'jump_false' is next insn. */ @@ -1415,6 +1415,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) return dev_forward_skb(dev, skb2); skb2->dev = dev; + skb_sender_cpu_clear(skb2); return dev_queue_xmit(skb2); } @@ -1854,9 +1855,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, goto out; /* We're copying the filter that has been originally attached, - * so no conversion/decode needed anymore. + * so no conversion/decode needed anymore. eBPF programs that + * have no original program cannot be dumped through this. */ + ret = -EACCES; fprog = filter->prog->orig_prog; + if (!fprog) + goto out; ret = fprog->len; if (!len) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b279077c3089..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -31,7 +31,6 @@ static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; -static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; @@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) - ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); + ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; @@ -1481,6 +1480,15 @@ static int of_dev_node_match(struct device *dev, const void *data) return ret == 0 ? dev->of_node == data : ret; } +/* + * of_find_net_device_by_node - lookup the net device for the device node + * @np: OF device node + * + * Looks up the net_device structure corresponding with the device node. + * If successful, returns a pointer to the net_device with the embedded + * struct device refcount incremented by one, or NULL on failure. The + * refcount must be dropped when done with the net_device. + */ struct net_device *of_find_net_device_by_node(struct device_node *np) { struct device *dev; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6aa3db8dfc3b..8bdada242a7d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work) */ static int poll_one_napi(struct napi_struct *napi, int budget) { - int work; + int work = 0; /* net_rx_action's ->poll() invocations and our's are * synchronized by this test which is only made while @@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget) if (!test_bit(NAPI_STATE_SCHED, &napi->state)) return budget; - set_bit(NAPI_STATE_NPSVC, &napi->state); + /* If we set this bit but see that it has already been set, + * that indicates that napi has been disabled and we need + * to abort this operation + */ + if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) + goto out; work = napi->poll(napi, budget); WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); @@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget) clear_bit(NAPI_STATE_NPSVC, &napi->state); +out: return budget - work; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a466821d1441..0ec48403ed68 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3047,6 +3047,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) u32 portid = NETLINK_CB(cb->skb).portid; u32 seq = cb->nlh->nlmsg_seq; u32 filter_mask = 0; + int err; if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { struct nlattr *extfilt; @@ -3067,20 +3068,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) struct net_device *br_dev = netdev_master_upper_dev_get(dev); if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { - if (idx >= cb->args[0] && - br_dev->netdev_ops->ndo_bridge_getlink( - skb, portid, seq, dev, filter_mask, - NLM_F_MULTI) < 0) - break; + if (idx >= cb->args[0]) { + err = br_dev->netdev_ops->ndo_bridge_getlink( + skb, portid, seq, dev, + filter_mask, NLM_F_MULTI); + if (err < 0 && err != -EOPNOTSUPP) + break; + } idx++; } if (ops->ndo_bridge_getlink) { - if (idx >= cb->args[0] && - ops->ndo_bridge_getlink(skb, portid, seq, dev, - filter_mask, - NLM_F_MULTI) < 0) - break; + if (idx >= cb->args[0]) { + err = ops->ndo_bridge_getlink(skb, portid, + seq, dev, + filter_mask, + NLM_F_MULTI); + if (err < 0 && err != -EOPNOTSUPP) + break; + } idx++; } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); */ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { + unsigned char *data = skb->data; + BUG_ON(len > skb->len); - skb->len -= len; - BUG_ON(skb->len < skb->data_len); - skb_postpull_rcsum(skb, skb->data, len); - return skb->data += len; + __skb_pull(skb, len); + skb_postpull_rcsum(skb, data, len); + return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); diff --git a/net/core/sock.c b/net/core/sock.c index ca2984afe16e..3307c02244d3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot) return; kfree(rsk_prot->slab_name); rsk_prot->slab_name = NULL; - if (rsk_prot->slab) { - kmem_cache_destroy(rsk_prot->slab); - rsk_prot->slab = NULL; - } + kmem_cache_destroy(rsk_prot->slab); + rsk_prot->slab = NULL; } static int req_prot_init(const struct proto *prot) @@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot) list_del(&prot->node); mutex_unlock(&proto_list_mutex); - if (prot->slab != NULL) { - kmem_cache_destroy(prot->slab); - prot->slab = NULL; - } + kmem_cache_destroy(prot->slab); + prot->slab = NULL; req_prot_cleanup(prot->rsk_prot); diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bd9e718c2a20..3de0d0362d7f 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -398,12 +398,8 @@ out_err: void dccp_ackvec_exit(void) { - if (dccp_ackvec_slab != NULL) { - kmem_cache_destroy(dccp_ackvec_slab); - dccp_ackvec_slab = NULL; - } - if (dccp_ackvec_record_slab != NULL) { - kmem_cache_destroy(dccp_ackvec_record_slab); - dccp_ackvec_record_slab = NULL; - } + kmem_cache_destroy(dccp_ackvec_slab); + dccp_ackvec_slab = NULL; + kmem_cache_destroy(dccp_ackvec_record_slab); + dccp_ackvec_record_slab = NULL; } diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 83498975165f..90f77d08cc37 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { - if (slab != NULL) - kmem_cache_destroy(slab); + kmem_cache_destroy(slab); } static int __init ccid_activate(struct ccid_operations *ccid_ops) diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 30addee2dd03..838f524cf11a 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) tw->tw_ipv6only = sk->sk_ipv6only; } #endif - /* Linkage updates. */ - __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) @@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) timeo = DCCP_TIMEWAIT_LEN; inet_twsk_schedule(tw, timeo); + /* Linkage updates. */ + __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); inet_twsk_put(tw); } else { /* Sorry, if we're out of memory, just CLOSE this diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 76e3800765f8..adb5325f4934 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -22,6 +22,7 @@ #include <linux/of_platform.h> #include <linux/of_net.h> #include <linux/sysfs.h> +#include <linux/phy_fixed.h> #include "dsa_priv.h" char dsa_driver_version[] = "0.1"; @@ -305,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (ret < 0) goto out; - ds->slave_mii_bus = mdiobus_alloc(); + ds->slave_mii_bus = devm_mdiobus_alloc(parent); if (ds->slave_mii_bus == NULL) { ret = -ENOMEM; goto out; @@ -314,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) ret = mdiobus_register(ds->slave_mii_bus); if (ret < 0) - goto out_free; + goto out; /* @@ -367,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) return ret; -out_free: - mdiobus_free(ds->slave_mii_bus); out: - kfree(ds); return ret; } @@ -400,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Allocate and initialise switch state. */ - ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); + ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL); if (ds == NULL) return ERR_PTR(-ENOMEM); @@ -420,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, static void dsa_switch_destroy(struct dsa_switch *ds) { + struct device_node *port_dn; + struct phy_device *phydev; + struct dsa_chip_data *cd = ds->pd; + int port; + #ifdef CONFIG_NET_DSA_HWMON if (ds->hwmon_dev) hwmon_device_unregister(ds->hwmon_dev); #endif + + /* Disable configuration of the CPU and DSA ports */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) + continue; + + port_dn = cd->port_dn[port]; + if (of_phy_is_fixed_link(port_dn)) { + phydev = of_phy_find_device(port_dn); + if (phydev) { + int addr = phydev->addr; + + phy_device_free(phydev); + of_node_put(port_dn); + fixed_phy_del(addr); + } + } + } + + /* Destroy network devices for physical switch ports. */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(ds->phys_port_mask & (1 << port))) + continue; + + if (!ds->ports[port]) + continue; + + unregister_netdev(ds->ports[port]); + free_netdev(ds->ports[port]); + } + + mdiobus_unregister(ds->slave_mii_bus); } #ifdef CONFIG_PM_SLEEP @@ -634,6 +669,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) port_index++; } kfree(pd->chip[i].rtable); + + /* Drop our reference to the MDIO bus device */ + if (pd->chip[i].host_dev) + put_device(pd->chip[i].host_dev); } kfree(pd->chip); } @@ -661,16 +700,22 @@ static int dsa_of_probe(struct device *dev) return -EPROBE_DEFER; ethernet = of_parse_phandle(np, "dsa,ethernet", 0); - if (!ethernet) - return -EINVAL; + if (!ethernet) { + ret = -EINVAL; + goto out_put_mdio; + } ethernet_dev = of_find_net_device_by_node(ethernet); - if (!ethernet_dev) - return -EPROBE_DEFER; + if (!ethernet_dev) { + ret = -EPROBE_DEFER; + goto out_put_mdio; + } pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) - return -ENOMEM; + if (!pd) { + ret = -ENOMEM; + goto out_put_ethernet; + } dev->platform_data = pd; pd->of_netdev = ethernet_dev; @@ -691,7 +736,9 @@ static int dsa_of_probe(struct device *dev) cd = &pd->chip[chip_index]; cd->of_node = child; - cd->host_dev = &mdio_bus->dev; + + /* When assigning the host device, increment its refcount */ + cd->host_dev = get_device(&mdio_bus->dev); sw_addr = of_get_property(child, "reg", NULL); if (!sw_addr) @@ -711,6 +758,12 @@ static int dsa_of_probe(struct device *dev) ret = -EPROBE_DEFER; goto out_free_chip; } + + /* Drop the mdio_bus device ref, replacing the host + * device with the mdio_bus_switch device, keeping + * the refcount from of_mdio_find_bus() above. + */ + put_device(cd->host_dev); cd->host_dev = &mdio_bus_switch->dev; } @@ -744,6 +797,10 @@ static int dsa_of_probe(struct device *dev) } } + /* The individual chips hold their own refcount on the mdio bus, + * so drop ours */ + put_device(&mdio_bus->dev); + return 0; out_free_chip: @@ -751,6 +808,10 @@ out_free_chip: out_free: kfree(pd); dev->platform_data = NULL; +out_put_ethernet: + put_device(ðernet_dev->dev); +out_put_mdio: + put_device(&mdio_bus->dev); return ret; } @@ -762,6 +823,7 @@ static void dsa_of_remove(struct device *dev) return; dsa_of_free_platform_data(pd); + put_device(&pd->of_netdev->dev); kfree(pd); } #else @@ -775,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev) } #endif -static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, - struct device *parent, struct dsa_platform_data *pd) +static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, + struct device *parent, struct dsa_platform_data *pd) { int i; + unsigned configured = 0; dst->pd = pd; dst->master_netdev = dev; @@ -798,9 +861,17 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->ds[i] = ds; if (ds->drv->poll_link != NULL) dst->link_poll_needed = 1; + + ++configured; } /* + * If no switch was found, exit cleanly + */ + if (!configured) + return -EPROBE_DEFER; + + /* * If we use a tagging format that doesn't have an ethertype * field, make sure that all packets from this point on get * sent to the tag format's receive function. @@ -816,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->link_poll_timer.expires = round_jiffies(jiffies + HZ); add_timer(&dst->link_poll_timer); } + + return 0; } static int dsa_probe(struct platform_device *pdev) @@ -856,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev) goto out; } - dst = kzalloc(sizeof(*dst), GFP_KERNEL); + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); if (dst == NULL) { dev_put(dev); ret = -ENOMEM; @@ -865,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dst); - dsa_setup_dst(dst, dev, &pdev->dev, pd); + ret = dsa_setup_dst(dst, dev, &pdev->dev, pd); + if (ret) + goto out; return 0; @@ -887,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst) for (i = 0; i < dst->pd->nr_chips; i++) { struct dsa_switch *ds = dst->ds[i]; - if (ds != NULL) + if (ds) dsa_switch_destroy(ds); } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) static int dsa_slave_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) { - int ret = 0; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; switch (attr->id) { case SWITCHDEV_ATTR_PORT_STP_STATE: - if (attr->trans == SWITCHDEV_TRANS_COMMIT) - ret = dsa_slave_stp_update(dev, attr->u.stp_state); + if (attr->trans == SWITCHDEV_TRANS_PREPARE) + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; + else + ret = ds->drv->port_stp_update(ds, p->port, + attr->u.stp_state); break; default: ret = -EOPNOTSUPP; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d25efc93d8f1..b6ca0890d018 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -78,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, trailer = skb_tail_pointer(skb) - 4; if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || - (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00) + (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00) goto out_drop; source_port = trailer[1] & 7; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 30409b75e925..0c9c3482e419 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -113,6 +113,8 @@ #include <net/arp.h> #include <net/ax25.h> #include <net/netrom.h> +#include <net/dst_metadata.h> +#include <net/ip_tunnels.h> #include <linux/uaccess.h> @@ -296,7 +298,8 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, - const unsigned char *target_hw, struct sk_buff *oskb) + const unsigned char *target_hw, + struct dst_entry *dst) { struct sk_buff *skb; @@ -309,9 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, if (!skb) return; - if (oskb) - skb_dst_copy(skb, oskb); - + skb_dst_set(skb, dst_clone(dst)); arp_xmit(skb); } @@ -333,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) __be32 target = *(__be32 *)neigh->primary_key; int probes = atomic_read(&neigh->probes); struct in_device *in_dev; + struct dst_entry *dst = NULL; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); @@ -381,9 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) } } + if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) + dst = skb_dst(skb); arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, - dst_hw, dev->dev_addr, NULL, - dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb); + dst_hw, dev->dev_addr, NULL, dst); } static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) @@ -649,6 +652,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) int addr_type; struct neighbour *n; struct net *net = dev_net(dev); + struct dst_entry *reply_dst = NULL; bool is_garp = false; /* arp_rcv below verifies the ARP header and verifies the device @@ -749,13 +753,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) * cache. */ + if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb)) + reply_dst = (struct dst_entry *) + iptunnel_metadata_reply(skb_metadata_dst(skb), + GFP_ATOMIC); + /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && !arp_ignore(in_dev, sip, tip)) - arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, - dev->dev_addr, sha); + arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, + sha, dev->dev_addr, sha, reply_dst); goto out; } @@ -774,9 +783,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) if (!dont_send) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) { - arp_send(ARPOP_REPLY, ETH_P_ARP, sip, - dev, tip, sha, dev->dev_addr, - sha); + arp_send_dst(ARPOP_REPLY, ETH_P_ARP, + sip, dev, tip, sha, + dev->dev_addr, sha, + reply_dst); neigh_release(n); } } @@ -794,13 +804,14 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || skb->pkt_type == PACKET_HOST || NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { - arp_send(ARPOP_REPLY, ETH_P_ARP, sip, - dev, tip, sha, dev->dev_addr, - sha); + arp_send_dst(ARPOP_REPLY, ETH_P_ARP, + sip, dev, tip, sha, + dev->dev_addr, sha, + reply_dst); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); - return 0; + goto out_free_dst; } goto out; } @@ -854,6 +865,8 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) out: consume_skb(skb); +out_free_dst: + dst_release(reply_dst); return 0; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; + fl4.flowi4_flags = 0; no_addr = idev->ifa_list == NULL; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 26d6ffb6d23c..6c2af797f2f9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1426,7 +1426,7 @@ found: nh->nh_flags & RTNH_F_LINKDOWN && !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) continue; - if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { + if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) continue; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 79fe05befcae..e5eb8ac4089d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) fl4.flowi4_mark = mark; fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); fl4.flowi4_proto = IPPROTO_ICMP; - fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; + fl4.flowi4_oif = vrf_master_ifindex(skb->dev); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) @@ -461,7 +461,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; - fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; + fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key(net, fl4); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 134957159c27..61b45a17fc73 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -577,21 +577,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack); static bool reqsk_queue_unlink(struct request_sock_queue *queue, struct request_sock *req) { - struct listen_sock *lopt = queue->listen_opt; struct request_sock **prev; + struct listen_sock *lopt; bool found = false; spin_lock(&queue->syn_wait_lock); - - for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; - prev = &(*prev)->dl_next) { - if (*prev == req) { - *prev = req->dl_next; - found = true; - break; + lopt = queue->listen_opt; + if (lopt) { + for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; + prev = &(*prev)->dl_next) { + if (*prev == req) { + *prev = req->dl_next; + found = true; + break; + } } } - spin_unlock(&queue->syn_wait_lock); if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) reqsk_put(req); @@ -685,20 +686,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, req->num_timeout = 0; req->sk = NULL; + setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); + mod_timer_pinned(&req->rsk_timer, jiffies + timeout); + req->rsk_hash = hash; + /* before letting lookups find us, make sure all req fields * are committed to memory and refcnt initialized. */ smp_wmb(); atomic_set(&req->rsk_refcnt, 2); - setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); - req->rsk_hash = hash; spin_lock(&queue->syn_wait_lock); req->dl_next = lopt->syn_table[hash]; lopt->syn_table[hash] = req; spin_unlock(&queue->syn_wait_lock); - - mod_timer_pinned(&req->rsk_timer, jiffies + timeout); } EXPORT_SYMBOL(reqsk_queue_hash_req); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ae22cc24fbe8..c67f9bd7699c 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, /* * Step 2: Hash TW into tcp ehash chain. * Notes : - * - tw_refcnt is set to 3 because : + * - tw_refcnt is set to 4 because : * - We have one reference from bhash chain. * - We have one reference from ehash chain. + * - We have one reference from timer. + * - One reference for ourself (our caller will release it). * We can use atomic_set() because prior spin_lock()/spin_unlock() * committed into memory all tw fields. */ - atomic_set(&tw->tw_refcnt, 1 + 1 + 1); + atomic_set(&tw->tw_refcnt, 4); inet_twsk_add_node_rcu(tw, &ehead->chain); /* Step 3: Remove SK from hash chain */ @@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) } EXPORT_SYMBOL(inet_twsk_deschedule_put); -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) { /* timeout := RTO * 3.5 * @@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) */ tw->tw_kill = timeo <= 4*HZ; - if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { - atomic_inc(&tw->tw_refcnt); + if (!rearm) { + BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo)); atomic_inc(&tw->tw_dr->tw_count); + } else { + mod_timer_pending(&tw->tw_timer, jiffies + timeo); } } -EXPORT_SYMBOL_GPL(inet_twsk_schedule); +EXPORT_SYMBOL_GPL(__inet_twsk_schedule); void inet_twsk_purge(struct inet_hashinfo *hashinfo, struct inet_timewait_death_row *twdr, int family) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 29ed6c5a5185..84dce6a92f93 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -46,12 +46,13 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> +#include <net/dst_metadata.h> int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { - int pkt_len = skb->len; + int pkt_len = skb->len - skb_inner_network_offset(skb); struct iphdr *iph; int err; @@ -119,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) } EXPORT_SYMBOL_GPL(iptunnel_pull_header); +struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, + gfp_t flags) +{ + struct metadata_dst *res; + struct ip_tunnel_info *dst, *src; + + if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) + return NULL; + + res = metadata_dst_alloc(0, flags); + if (!res) + return NULL; + + dst = &res->u.tun_info; + src = &md->u.tun_info; + dst->key.tun_id = src->key.tun_id; + if (src->mode & IP_TUNNEL_INFO_IPV6) + memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src, + sizeof(struct in6_addr)); + else + dst->key.u.ipv4.dst = src->key.u.ipv4.src; + dst->mode = src->mode | IP_TUNNEL_INFO_TX; + + return res; +} +EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); + struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool csum_help, int gso_type_mask) @@ -198,8 +226,6 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, - [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 }, - [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 }, [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, }; @@ -239,12 +265,6 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, if (tb[LWTUNNEL_IP_TOS]) tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); - if (tb[LWTUNNEL_IP_SPORT]) - tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]); - - if (tb[LWTUNNEL_IP_DPORT]) - tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]); - if (tb[LWTUNNEL_IP_FLAGS]) tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); @@ -266,8 +286,6 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || - nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) || - nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) || nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; @@ -281,8 +299,6 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) + nla_total_size(4) /* LWTUNNEL_IP_SRC */ + nla_total_size(1) /* LWTUNNEL_IP_TOS */ + nla_total_size(1) /* LWTUNNEL_IP_TTL */ - + nla_total_size(2) /* LWTUNNEL_IP_SPORT */ - + nla_total_size(2) /* LWTUNNEL_IP_DPORT */ + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ } @@ -305,8 +321,6 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, - [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 }, - [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 }, [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, }; @@ -346,12 +360,6 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr, if (tb[LWTUNNEL_IP6_TC]) tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); - if (tb[LWTUNNEL_IP6_SPORT]) - tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]); - - if (tb[LWTUNNEL_IP6_DPORT]) - tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]); - if (tb[LWTUNNEL_IP6_FLAGS]) tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); @@ -373,8 +381,6 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || - nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) || - nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) || nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; @@ -388,8 +394,6 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ + nla_total_size(1) /* LWTUNNEL_IP6_TC */ - + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */ - + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */ + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5f4a5565ad8b..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; + fl4.flowi4_flags = 0; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res, 0); @@ -2045,6 +2046,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) struct fib_result res; struct rtable *rth; int orig_oif; + int err = -ENETUNREACH; res.tclassid = 0; res.fi = NULL; @@ -2153,7 +2155,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) goto make_route; } - if (fib_lookup(net, fl4, &res, 0)) { + err = fib_lookup(net, fl4, &res, 0); + if (err) { res.fi = NULL; res.table = NULL; if (fl4->flowi4_oif) { @@ -2181,7 +2184,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) res.type = RTN_UNICAST; goto make_route; } - rth = ERR_PTR(-ENETUNREACH); + rth = ERR_PTR(err); goto out; } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index c6ded6b2a79f..448c2615fece 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk) static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_TX_START) { - s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime; struct bictcp *ca = inet_csk_ca(sk); + u32 now = tcp_time_stamp; + s32 delta; + + delta = now - tcp_sk(sk)->lsndtime; /* We were application limited (idle) for a while. * Shift epoch_start to keep cwnd growth to cubic curve. */ - if (ca->epoch_start && delta > 0) + if (ca->epoch_start && delta > 0) { ca->epoch_start += delta; + if (after(ca->epoch_start, now)) + ca->epoch_start = now; + } return; } } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6d8795b066ac..def765911ff8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -162,9 +162,9 @@ kill_with_rst: if (tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && tcp_tw_remember_stamp(tw)) - inet_twsk_schedule(tw, tw->tw_timeout); + inet_twsk_reschedule(tw, tw->tw_timeout); else - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; } @@ -201,7 +201,7 @@ kill: return TCP_TW_SUCCESS; } } - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; @@ -251,7 +251,7 @@ kill: * Do not reschedule in the last case. */ if (paws_reject || th->ack) - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); @@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) } while (0); #endif - /* Linkage updates. */ - __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); - /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) timeo = rto; @@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) } inet_twsk_schedule(tw, timeo); + /* Linkage updates. */ + __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); inet_twsk_put(tw); } else { /* Sorry, if we're out of memory, just CLOSE this diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f9a8a12b62ee..1100ffe4a722 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2897,6 +2897,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); + skb_mstamp_get(&skb->skb_mstamp); /* Send it off. */ if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c0a15e7f359f..f7d1d5e19e95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1024,7 +1024,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (netif_index_is_vrf(net, ipc.oif)) { flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, - (flow_flags | FLOWI_FLAG_VRFSRC), + (flow_flags | FLOWI_FLAG_VRFSRC | + FLOWI_FLAG_SKIP_NH_OIF), faddr, saddr, dport, inet->inet_sport); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index bb919b28619f..c10a9ee68433 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -33,6 +33,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, if (saddr) fl4->saddr = saddr->a4; + fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF; + rt = __ip_route_output_key(net, fl4); if (!IS_ERR(rt)) return &rt->dst; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 030fefdc9aed..36b85bd05ac8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3119,6 +3119,8 @@ static void addrconf_gre_config(struct net_device *dev) } addrconf_addr_gen(idev, true); + if (dev->flags & IFF_POINTOPOINT) + addrconf_add_mroute(dev); } #endif @@ -5127,13 +5129,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, ifp->idev->dev, 0, 0); - if (rt && ip6_del_rt(rt)) - dst_free(&rt->dst); + if (rt) + ip6_del_rt(rt); } dst_hold(&ifp->rt->dst); - if (ip6_del_rt(ifp->rt)) - dst_free(&ifp->rt->dst); + ip6_del_rt(ifp->rt); rt_genid_bump_ipv6(net); break; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 418d9823692b..7d2e0023c72d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn) kmem_cache_free(fib6_node_kmem, fn); } +static void rt6_rcu_free(struct rt6_info *rt) +{ + call_rcu(&rt->dst.rcu_head, dst_rcu_free); +} + static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) { int cpu; @@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); pcpu_rt = *ppcpu_rt; if (pcpu_rt) { - dst_free(&pcpu_rt->dst); + rt6_rcu_free(pcpu_rt); *ppcpu_rt = NULL; } } @@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt) { if (atomic_dec_and_test(&rt->rt6i_ref)) { rt6_free_pcpu(rt); - dst_free(&rt->dst); + rt6_rcu_free(rt); } } @@ -846,7 +851,7 @@ add: *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info); + inet6_rt_notify(RTM_NEWROUTE, rt, info, 0); info->nl_net->ipv6.rt6_stats->fib_rt_entries++; if (!(fn->fn_flags & RTN_RTINFO)) { @@ -872,7 +877,7 @@ add: rt->rt6i_node = fn; rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info); + inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; @@ -933,6 +938,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, int replace_required = 0; int sernum = fib6_new_sernum(info->nl_net); + if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) && + !atomic_read(&rt->dst.__refcnt))) + return -EINVAL; + if (info->nlh) { if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) allow_create = 0; @@ -1025,6 +1034,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, fib6_start_gc(info->nl_net, rt); if (!(rt->rt6i_flags & RTF_CACHE)) fib6_prune_clones(info->nl_net, pn); + rt->dst.flags &= ~DST_NOCACHE; } out: @@ -1049,7 +1059,8 @@ out: atomic_inc(&pn->leaf->rt6i_ref); } #endif - dst_free(&rt->dst); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); } return err; @@ -1060,7 +1071,8 @@ out: st_failure: if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) fib6_repair_tree(info->nl_net, fn); - dst_free(&rt->dst); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); return err; #endif } @@ -1410,7 +1422,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, fib6_purge_rt(rt, fn, net); - inet6_rt_notify(RTM_DELROUTE, rt, info); + inet6_rt_notify(RTM_DELROUTE, rt, info, 0); rt6_release(rt); } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4038c694ec03..3c7b9310b33f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: - net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", - t->parms.name); + net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", + t->parms.name); break; case ICMPV6_TIME_EXCEED: if (code == ICMPV6_EXC_HOPLIMIT) { - net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", - t->parms.name); + net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", + t->parms.name); } break; case ICMPV6_PARAMPROB: @@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (teli && teli == be32_to_cpu(info) - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { - net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", - t->parms.name); + net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", + t->parms.name); } } else { - net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", - t->parms.name); + net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", + t->parms.name); } break; case ICMPV6_PKT_TOOBIG: @@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, } if (!fl6->flowi6_mark) - dst = ip6_tnl_dst_check(tunnel); + dst = ip6_tnl_dst_get(tunnel); if (!dst) { - ndst = ip6_route_output(net, NULL, fl6); + dst = ip6_route_output(net, NULL, fl6); - if (ndst->error) + if (dst->error) goto tx_err_link_failure; - ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); - if (IS_ERR(ndst)) { - err = PTR_ERR(ndst); - ndst = NULL; + dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + dst = NULL; goto tx_err_link_failure; } - dst = ndst; + ndst = dst; } tdev = dst->dev; @@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, skb = new_skb; } - if (fl6->flowi6_mark) { - skb_dst_set(skb, dst); - ndst = NULL; - } else { - skb_dst_set_noref(skb, dst); - } + if (!fl6->flowi6_mark && ndst) + ip6_tnl_dst_set(tunnel, ndst); + skb_dst_set(skb, dst); proto = NEXTHDR_GRE; if (encap_limit >= 0) { @@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, skb_set_inner_protocol(skb, protocol); ip6tunnel_xmit(NULL, skb, dev); - if (ndst) - ip6_tnl_dst_store(tunnel, ndst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: - dst_release(ndst); + dst_release(dst); return err; } @@ -1223,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = { static void ip6gre_dev_free(struct net_device *dev) { + struct ip6_tnl *t = netdev_priv(dev); + + ip6_tnl_dst_destroy(t); free_percpu(dev->tstats); free_netdev(dev); } @@ -1245,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev) netif_keep_dst(dev); } -static int ip6gre_tunnel_init(struct net_device *dev) +static int ip6gre_tunnel_init_common(struct net_device *dev) { struct ip6_tnl *tunnel; + int ret; tunnel = netdev_priv(dev); @@ -1255,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev) tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + ret = ip6_tnl_dst_init(tunnel); + if (ret) { + free_percpu(dev->tstats); + dev->tstats = NULL; + return ret; + } + + return 0; +} + +static int ip6gre_tunnel_init(struct net_device *dev) +{ + struct ip6_tnl *tunnel; + int ret; + + ret = ip6gre_tunnel_init_common(dev); + if (ret) + return ret; + + tunnel = netdev_priv(dev); + memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); if (ipv6_addr_any(&tunnel->parms.raddr)) dev->header_ops = &ip6gre_header_ops; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; - return 0; } @@ -1460,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[], static int ip6gre_tap_init(struct net_device *dev) { struct ip6_tnl *tunnel; + int ret; - tunnel = netdev_priv(dev); + ret = ip6gre_tunnel_init_common(dev); + if (ret) + return ret; - tunnel->dev = dev; - tunnel->net = dev_net(dev); - strcpy(tunnel->parms.name, dev->name); + tunnel = netdev_priv(dev); ip6gre_tnl_link_config(tunnel, 1); - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; - return 0; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 26ea47930740..61d403ee1031 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb) if (skb->pkt_type != PACKET_HOST) goto drop; + if (unlikely(skb->sk)) + goto drop; + if (skb_warn_if_lro(skb)) goto drop; @@ -586,20 +589,22 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr); + hroom = LL_RESERVED_SPACE(rt->dst.dev); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || - skb_cloned(skb)) + skb_cloned(skb) || + skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || - skb_headroom(frag) < hlen) + skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr))) goto slow_path_clean; /* Partially cloned skb? */ @@ -616,8 +621,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, err = 0; offset = 0; - frag = skb_shinfo(skb)->frag_list; - skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; @@ -625,8 +628,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); - return -ENOMEM; + err = -ENOMEM; + goto fail; } + frag = skb_shinfo(skb)->frag_list; + skb_frag_list_init(skb); __skb_pull(skb, hlen); fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); @@ -723,7 +729,6 @@ slow_path: */ *prevhdr = NEXTHDR_FRAGMENT; - hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b0ab420612bc..eabffbb89795 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) * Locking : hash tables are protected by RCU and RTNL */ -struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) +static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst, + struct dst_entry *dst) { - struct dst_entry *dst = t->dst_cache; + write_seqlock_bh(&idst->lock); + dst_release(rcu_dereference_protected( + idst->dst, + lockdep_is_held(&idst->lock.lock))); + if (dst) { + dst_hold(dst); + idst->cookie = rt6_get_cookie((struct rt6_info *)dst); + } else { + idst->cookie = 0; + } + rcu_assign_pointer(idst->dst, dst); + write_sequnlock_bh(&idst->lock); +} + +struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t) +{ + struct ip6_tnl_dst *idst; + struct dst_entry *dst; + unsigned int seq; + u32 cookie; - if (dst && dst->obsolete && - !dst->ops->check(dst, t->dst_cookie)) { - t->dst_cache = NULL; + idst = raw_cpu_ptr(t->dst_cache); + + rcu_read_lock(); + do { + seq = read_seqbegin(&idst->lock); + dst = rcu_dereference(idst->dst); + cookie = idst->cookie; + } while (read_seqretry(&idst->lock, seq)); + + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = NULL; + rcu_read_unlock(); + + if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) { + ip6_tnl_per_cpu_dst_set(idst, NULL); dst_release(dst); - return NULL; + dst = NULL; } - return dst; } -EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); +EXPORT_SYMBOL_GPL(ip6_tnl_dst_get); void ip6_tnl_dst_reset(struct ip6_tnl *t) { - dst_release(t->dst_cache); - t->dst_cache = NULL; + int i; + + for_each_possible_cpu(i) + ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); } EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); -void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) +void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst) +{ + ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst); + +} +EXPORT_SYMBOL_GPL(ip6_tnl_dst_set); + +void ip6_tnl_dst_destroy(struct ip6_tnl *t) { - struct rt6_info *rt = (struct rt6_info *) dst; - t->dst_cookie = rt6_get_cookie(rt); - dst_release(t->dst_cache); - t->dst_cache = dst; + if (!t->dst_cache) + return; + + ip6_tnl_dst_reset(t); + free_percpu(t->dst_cache); } -EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); +EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy); + +int ip6_tnl_dst_init(struct ip6_tnl *t) +{ + int i; + + t->dst_cache = alloc_percpu(struct ip6_tnl_dst); + if (!t->dst_cache) + return -ENOMEM; + + for_each_possible_cpu(i) + seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ip6_tnl_dst_init); /** * ip6_tnl_lookup - fetch tunnel matching the end-point addresses @@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) static void ip6_dev_free(struct net_device *dev) { + struct ip6_tnl *t = netdev_priv(dev); + + ip6_tnl_dst_destroy(t); free_percpu(dev->tstats); free_netdev(dev); } @@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: - net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", - t->parms.name); + net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", + t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { - net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", - t->parms.name); + net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } break; @@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { - net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", - t->parms.name); + net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } } else { - net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", - t->parms.name); + net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", + t->parms.name); } break; case ICMPV6_PKT_TOOBIG: @@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); } else if (!fl6->flowi6_mark) - dst = ip6_tnl_dst_check(t); + dst = ip6_tnl_dst_get(t); if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) goto tx_err_link_failure; if (!dst) { - ndst = ip6_route_output(net, NULL, fl6); + dst = ip6_route_output(net, NULL, fl6); - if (ndst->error) + if (dst->error) goto tx_err_link_failure; - ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); - if (IS_ERR(ndst)) { - err = PTR_ERR(ndst); - ndst = NULL; + dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + dst = NULL; goto tx_err_link_failure; } - dst = ndst; + ndst = dst; } tdev = dst->dev; @@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, consume_skb(skb); skb = new_skb; } - if (fl6->flowi6_mark) { - skb_dst_set(skb, dst); - ndst = NULL; - } else { - skb_dst_set_noref(skb, dst); - } + + if (!fl6->flowi6_mark && ndst) + ip6_tnl_dst_set(t, ndst); + skb_dst_set(skb, dst); + skb->transport_header = skb->network_header; proto = fl6->flowi6_proto; @@ -1101,14 +1159,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ipv6h->saddr = fl6->saddr; ipv6h->daddr = fl6->daddr; ip6tunnel_xmit(NULL, skb, dev); - if (ndst) - ip6_tnl_dst_store(t, ndst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: - dst_release(ndst); + dst_release(dst); return err; } @@ -1573,12 +1629,21 @@ static inline int ip6_tnl_dev_init_gen(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); + int ret; t->dev = dev; t->net = dev_net(dev); dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + + ret = ip6_tnl_dst_init(t); + if (ret) { + free_percpu(dev->tstats); + dev->tstats = NULL; + return ret; + } + return 0; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 53617d715188..968f31c01f89 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -142,6 +142,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct net_device *loopback_dev = net->loopback_dev; int cpu; + if (dev == loopback_dev) + return; + for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; @@ -151,14 +154,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; - if (rt_idev && (rt_idev->dev == dev || !dev) && - rt_idev->dev != loopback_dev) { + if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); } - if (rt_dev && (rt_dev == dev || !dev) && - rt_dev != loopback_dev) { + if (rt_dev == dev) { rt->dst.dev = loopback_dev; dev_hold(rt->dst.dev); dev_put(rt_dev); @@ -247,12 +248,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, { } -static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst, - unsigned long old) -{ - return NULL; -} - static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .destroy = ip6_dst_destroy, @@ -261,7 +256,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .redirect = ip6_rt_blackhole_redirect, - .cow_metrics = ip6_rt_blackhole_cow_metrics, + .cow_metrics = dst_cow_metrics_generic, .neigh_lookup = ip6_neigh_lookup, }; @@ -318,6 +313,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = { #endif +static void rt6_info_init(struct rt6_info *rt) +{ + struct dst_entry *dst = &rt->dst; + + memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); + INIT_LIST_HEAD(&rt->rt6i_siblings); + INIT_LIST_HEAD(&rt->rt6i_uncached); +} + /* allocate dst with ip6_dst_ops */ static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, @@ -326,13 +330,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net, struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0, DST_OBSOLETE_FORCE_CHK, flags); - if (rt) { - struct dst_entry *dst = &rt->dst; + if (rt) + rt6_info_init(rt); - memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); - INIT_LIST_HEAD(&rt->rt6i_siblings); - INIT_LIST_HEAD(&rt->rt6i_uncached); - } return rt; } @@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, fl6->flowi6_iif = LOOPBACK_IFINDEX; - if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || + fl6->flowi6_oif) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl6->saddr)) @@ -1212,24 +1213,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); if (rt) { - new = &rt->dst; - - memset(new + 1, 0, sizeof(*rt) - sizeof(*new)); + rt6_info_init(rt); + new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_sk; - if (dst_metrics_read_only(&ort->dst)) - new->_metrics = ort->dst._metrics; - else - dst_copy_metrics(new, &ort->dst); + dst_copy_metrics(new, &ort->dst); rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags; + rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); @@ -1322,8 +1319,7 @@ static void ip6_link_failure(struct sk_buff *skb) if (rt) { if (rt->rt6i_flags & RTF_CACHE) { dst_hold(&rt->dst); - if (ip6_del_rt(rt)) - dst_free(&rt->dst); + ip6_del_rt(rt); } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { rt->rt6i_node->fn_sernum = -1; } @@ -1886,9 +1882,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) rt->dst.input = ip6_pkt_prohibit; break; case RTN_THROW: + case RTN_UNREACHABLE: default: rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN - : -ENETUNREACH; + : (cfg->fc_type == RTN_UNREACHABLE) + ? -EHOSTUNREACH : -ENETUNREACH; rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; break; @@ -2028,7 +2026,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); - if (rt == net->ipv6.ip6_null_entry) { + if (rt == net->ipv6.ip6_null_entry || + rt->dst.flags & DST_NOCACHE) { err = -ENOENT; goto out; } @@ -2515,6 +2514,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); + rt->dst.flags |= DST_NOCACHE; atomic_set(&rt->dst.__refcnt, 1); @@ -2618,7 +2618,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev) fib6_clean_all(net, fib6_ifdown, &adn); icmp6_clean_all(fib6_ifdown, &adn); - rt6_uncached_list_flush_dev(net, dev); + if (dev) + rt6_uncached_list_flush_dev(net, dev); } struct rt6_mtu_change_arg { @@ -3303,7 +3304,8 @@ errout: return err; } -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) +void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, + unsigned int nlm_flags) { struct sk_buff *skb; struct net *net = info->nl_net; @@ -3318,7 +3320,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, - event, info->portid, seq, 0, 0, 0); + event, info->portid, seq, 0, 0, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 30caa289c5db..5cedfda4b241 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -37,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) tunnel = container_of(work, struct l2tp_tunnel, del_work); sk = l2tp_tunnel_sock_lookup(tunnel); if (!sk) - return; + goto out; sock = sk->sk_socket; @@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } l2tp_tunnel_sock_put(sk); +out: + l2tp_tunnel_dec_refcount(tunnel); } /* Create a socket for the tunnel, if one isn't set up by @@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { + l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_closeall(tunnel); - return (false == queue_work(l2tp_wq, &tunnel->del_work)); + if (false == queue_work(l2tp_wq, &tunnel->del_work)) { + l2tp_tunnel_dec_refcount(tunnel); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 17b1fe961c5d..7a77a1470f25 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2474,6 +2474,7 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, bss_conf->cqm_rssi_thold = rssi_thold; bss_conf->cqm_rssi_hyst = rssi_hyst; + sdata->u.mgd.last_cqm_event_signal = 0; /* tell the driver upon association, unless already associated */ if (sdata->u.mgd.associated && @@ -2518,15 +2519,17 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, continue; for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { - if (~sdata->rc_rateidx_mcs_mask[i][j]) + if (~sdata->rc_rateidx_mcs_mask[i][j]) { sdata->rc_has_mcs_mask[i] = true; + break; + } + } - if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) + for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { + if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { sdata->rc_has_vht_mcs_mask[i] = true; - - if (sdata->rc_has_mcs_mask[i] && - sdata->rc_has_vht_mcs_mask[i]) break; + } } } diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index ced6bf3be8d6..1560c8482bcb 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -149,7 +149,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { if (test_bit(i, local->hw.flags)) - pos += scnprintf(pos, end - pos, "%s", + pos += scnprintf(pos, end - pos, "%s\n", hw_flag_names[i]); } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 8ba583243509..3ed7ddfbf8e8 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, * when it wakes up for the next time. */ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT); + ieee80211_clear_fast_xmit(sta); /* * This code races in the following way: diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 84e0e8c7fb23..7892eb8ed4c8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) + else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) { info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + ieee80211_check_fast_xmit(tx->sta); + } info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; @@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || - test_sta_flag(sta, WLAN_STA_PS_DELIVER)) + test_sta_flag(sta, WLAN_STA_PS_DELIVER) || + test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) goto out; if (sdata->noack_map) diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 675d12c69e32..a5d41dfa9f05 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { + const struct nf_logger *log; int i; mutex_lock(&nf_log_mutex); - for (i = 0; i < NFPROTO_NUMPROTO; i++) - RCU_INIT_POINTER(loggers[i][logger->type], NULL); + for (i = 0; i < NFPROTO_NUMPROTO; i++) { + log = nft_log_dereference(loggers[i][logger->type]); + if (log == logger) + RCU_INIT_POINTER(loggers[i][logger->type], NULL); + } mutex_unlock(&nf_log_mutex); + synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 66def315eb56..9c8fab00164b 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -619,6 +619,13 @@ struct nft_xt { static struct nft_expr_type nft_match_type; +static bool nft_match_cmp(const struct xt_match *match, + const char *name, u32 rev, u32 family) +{ + return strcmp(match->name, name) == 0 && match->revision == rev && + (match->family == NFPROTO_UNSPEC || match->family == family); +} + static const struct nft_expr_ops * nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, struct nft_xt *nft_match; struct xt_match *match; char *mt_name; - __u32 rev, family; + u32 rev, family; if (tb[NFTA_MATCH_NAME] == NULL || tb[NFTA_MATCH_REV] == NULL || @@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, list_for_each_entry(nft_match, &nft_match_list, head) { struct xt_match *match = nft_match->ops.data; - if (strcmp(match->name, mt_name) == 0 && - match->revision == rev && match->family == family) { + if (nft_match_cmp(match, mt_name, rev, family)) { if (!try_module_get(match->me)) return ERR_PTR(-ENOENT); @@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list); static struct nft_expr_type nft_target_type; +static bool nft_target_cmp(const struct xt_target *tg, + const char *name, u32 rev, u32 family) +{ + return strcmp(tg->name, name) == 0 && tg->revision == rev && + (tg->family == NFPROTO_UNSPEC || tg->family == family); +} + static const struct nft_expr_ops * nft_target_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, struct nft_xt *nft_target; struct xt_target *target; char *tg_name; - __u32 rev, family; + u32 rev, family; if (tb[NFTA_TARGET_NAME] == NULL || tb[NFTA_TARGET_REV] == NULL || @@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, list_for_each_entry(nft_target, &nft_target_list, head) { struct xt_target *target = nft_target->ops.data; - if (strcmp(target->name, tg_name) == 0 && - target->revision == rev && target->family == family) { + if (nft_target_cmp(target, tg_name, rev, family)) { if (!try_module_get(target->me)) return ERR_PTR(-ENOENT); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7f86d3b55060..0a49a8c7c564 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group) return group ? 1 << (group - 1) : 0; } +static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, + gfp_t gfp_mask) +{ + unsigned int len = skb_end_offset(skb); + struct sk_buff *new; + + new = alloc_skb(len, gfp_mask); + if (new == NULL) + return NULL; + + NETLINK_CB(new).portid = NETLINK_CB(skb).portid; + NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; + NETLINK_CB(new).creds = NETLINK_CB(skb).creds; + + memcpy(skb_put(new, len), skb->data, len); + return new; +} + int netlink_add_tap(struct netlink_tap *nt) { if (unlikely(nt->dev->type != ARPHRD_NETLINK)) @@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, int ret = -ENOMEM; dev_hold(dev); - nskb = skb_clone(skb, GFP_ATOMIC); + + if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) + nskb = netlink_to_full_skb(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->dev = dev; nskb->protocol = htons((u16) sk->sk_protocol); @@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk) } #ifdef CONFIG_NETLINK_MMAP -static bool netlink_skb_is_mmaped(const struct sk_buff *skb) -{ - return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; -} - static bool netlink_rx_is_mmaped(struct sock *sk) { return nlk_sk(sk)->rx_ring.pg_vec != NULL; @@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) } #else /* CONFIG_NETLINK_MMAP */ -#define netlink_skb_is_mmaped(skb) false #define netlink_rx_is_mmaped(sk) false #define netlink_tx_is_mmaped(sk) false #define netlink_mmap sock_no_mmap @@ -1094,8 +1110,8 @@ static int netlink_insert(struct sock *sk, u32 portid) lock_sock(sk); - err = -EBUSY; - if (nlk_sk(sk)->portid) + err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; + if (nlk_sk(sk)->bound) goto err; err = -ENOMEM; @@ -1115,10 +1131,14 @@ static int netlink_insert(struct sock *sk, u32 portid) err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; - nlk_sk(sk)->portid = 0; sock_put(sk); + goto err; } + /* We need to ensure that the socket is hashed and visible. */ + smp_wmb(); + nlk_sk(sk)->bound = portid; + err: release_sock(sk); return err; @@ -1503,6 +1523,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; long unsigned int groups = nladdr->nl_groups; + bool bound; if (addr_len < sizeof(struct sockaddr_nl)) return -EINVAL; @@ -1519,9 +1540,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } - if (nlk->portid) + bound = nlk->bound; + if (bound) { + /* Ensure nlk->portid is up-to-date. */ + smp_rmb(); + if (nladdr->nl_pid != nlk->portid) return -EINVAL; + } if (nlk->netlink_bind && groups) { int group; @@ -1537,7 +1563,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, } } - if (!nlk->portid) { + /* No need for barriers here as we return to user-space without + * using any of the bound attributes. + */ + if (!bound) { err = nladdr->nl_pid ? netlink_insert(sk, nladdr->nl_pid) : netlink_autobind(sock); @@ -1585,7 +1614,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; - if (!nlk->portid) + /* No need for barriers here as we return to user-space without + * using any of the bound attributes. + */ + if (!nlk->bound) err = netlink_autobind(sock); if (err == 0) { @@ -2426,10 +2458,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) dst_group = nlk->dst_group; } - if (!nlk->portid) { + if (!nlk->bound) { err = netlink_autobind(sock); if (err) goto out; + } else { + /* Ensure nlk is hashed and visible. */ + smp_rmb(); } /* It's a really convoluted way for userland to ask for mmaped @@ -2750,6 +2785,7 @@ static int netlink_dump(struct sock *sk) struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int len, err = -ENOBUFS; + int alloc_min_size; int alloc_size; mutex_lock(nlk->cb_mutex); @@ -2758,9 +2794,6 @@ static int netlink_dump(struct sock *sk) goto errout_skb; } - cb = &nlk->cb; - alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); - if (!netlink_rx_is_mmaped(sk) && atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto errout_skb; @@ -2770,23 +2803,35 @@ static int netlink_dump(struct sock *sk) * to reduce number of system calls on dump operations, if user * ever provided a big enough buffer. */ - if (alloc_size < nlk->max_recvmsg_len) { - skb = netlink_alloc_skb(sk, - nlk->max_recvmsg_len, - nlk->portid, + cb = &nlk->cb; + alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); + + if (alloc_min_size < nlk->max_recvmsg_len) { + alloc_size = nlk->max_recvmsg_len; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - /* available room should be exact amount to avoid MSG_TRUNC */ - if (skb) - skb_reserve(skb, skb_tailroom(skb) - - nlk->max_recvmsg_len); } - if (!skb) + if (!skb) { + alloc_size = alloc_min_size; skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL); + } if (!skb) goto errout_skb; + + /* Trim skb to allocated size. User is expected to provide buffer as + * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at + * netlink_recvmsg())). dump will pack as many smaller messages as + * could fit within the allocated skb. skb is typically allocated + * with larger space than required (could be as much as near 2x the + * requested size with align to next power of 2 approach). Allowing + * dump to use the excess space makes it difficult for a user to have a + * reasonable static buffer based on the expected largest dump of a + * single netdev. The outcome is MSG_TRUNC error. + */ + skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); len = cb->dump(skb, cb); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 89008405d6b4..14437d9b1965 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -35,6 +35,7 @@ struct netlink_sock { unsigned long state; size_t max_recvmsg_len; wait_queue_head_t wait; + bool bound; bool cb_running; struct netlink_callback cb; struct mutex *cb_mutex; @@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) return container_of(sk, struct netlink_sock, sk); } +static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) +{ +#ifdef CONFIG_NETLINK_MMAP + return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; +#else + return false; +#endif /* CONFIG_NETLINK_MMAP */ +} + struct netlink_table { struct rhashtable hash; struct hlist_head mc_list; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 2a071f470d57..d143aa9f6654 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -5,7 +5,8 @@ config OPENVSWITCH tristate "Open vSwitch" depends on INET - depends on (!NF_CONNTRACK || NF_CONNTRACK) + depends on !NF_CONNTRACK || \ + (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6)) select LIBCRC32C select MPLS select NET_MPLS_GSO diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 315f5330b6e5..c6a39bf2c3b9 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -684,7 +684,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, { if (skb_network_offset(skb) > MAX_L2_LEN) { OVS_NLERR(1, "L2 header too long to fragment"); - return; + goto err; } if (ethertype == htons(ETH_P_IP)) { @@ -708,8 +708,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, struct rt6_info ovs_rt; if (!v6ops) { - kfree_skb(skb); - return; + goto err; } prepare_frag(vport, skb); @@ -728,8 +727,12 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", ovs_vport_name(vport), ntohs(ethertype), mru, vport->dev->mtu); - kfree_skb(skb); + goto err; } + + return; +err: + kfree_skb(skb); } static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, @@ -968,7 +971,7 @@ static int execute_masked_set_action(struct sk_buff *skb, case OVS_KEY_ATTR_CT_STATE: case OVS_KEY_ATTR_CT_ZONE: case OVS_KEY_ATTR_CT_MARK: - case OVS_KEY_ATTR_CT_LABEL: + case OVS_KEY_ATTR_CT_LABELS: err = -EINVAL; break; } @@ -1099,6 +1102,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_CT: + if (!is_flow_key_valid(key)) { + err = ovs_flow_key_update(skb, key); + if (err) + return err; + } + err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, nla_data(a)); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e8e524ad8a01..80bf702715bb 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -37,9 +37,9 @@ struct md_mark { }; /* Metadata label for masked write to conntrack label. */ -struct md_label { - struct ovs_key_ct_label value; - struct ovs_key_ct_label mask; +struct md_labels { + struct ovs_key_ct_labels value; + struct ovs_key_ct_labels mask; }; /* Conntrack action context for execution. */ @@ -47,10 +47,10 @@ struct ovs_conntrack_info { struct nf_conntrack_helper *helper; struct nf_conntrack_zone zone; struct nf_conn *ct; - u32 flags; + u8 commit : 1; u16 family; struct md_mark mark; - struct md_label label; + struct md_labels labels; }; static u16 key_to_nfproto(const struct sw_flow_key *key) @@ -109,21 +109,21 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct) #endif } -static void ovs_ct_get_label(const struct nf_conn *ct, - struct ovs_key_ct_label *label) +static void ovs_ct_get_labels(const struct nf_conn *ct, + struct ovs_key_ct_labels *labels) { struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; if (cl) { size_t len = cl->words * sizeof(long); - if (len > OVS_CT_LABEL_LEN) - len = OVS_CT_LABEL_LEN; - else if (len < OVS_CT_LABEL_LEN) - memset(label, 0, OVS_CT_LABEL_LEN); - memcpy(label, cl->bits, len); + if (len > OVS_CT_LABELS_LEN) + len = OVS_CT_LABELS_LEN; + else if (len < OVS_CT_LABELS_LEN) + memset(labels, 0, OVS_CT_LABELS_LEN); + memcpy(labels, cl->bits, len); } else { - memset(label, 0, OVS_CT_LABEL_LEN); + memset(labels, 0, OVS_CT_LABELS_LEN); } } @@ -134,7 +134,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, key->ct.state = state; key->ct.zone = zone->id; key->ct.mark = ovs_ct_get_mark(ct); - ovs_ct_get_label(ct, &key->ct.label); + ovs_ct_get_labels(ct, &key->ct.labels); } /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has @@ -167,7 +167,7 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) { - if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) + if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && @@ -179,8 +179,8 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label), - &key->ct.label)) + nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels), + &key->ct.labels)) return -EMSGSIZE; return 0; @@ -213,9 +213,9 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key, #endif } -static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ct_label *label, - const struct ovs_key_ct_label *mask) +static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ct_labels *labels, + const struct ovs_key_ct_labels *mask) { enum ip_conntrack_info ctinfo; struct nf_conn_labels *cl; @@ -235,15 +235,15 @@ static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key, nf_ct_labels_ext_add(ct); cl = nf_ct_labels_find(ct); } - if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN) + if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN) return -ENOSPC; - err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask, - OVS_CT_LABEL_LEN / sizeof(u32)); + err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask, + OVS_CT_LABELS_LEN / sizeof(u32)); if (err) return err; - ovs_ct_get_label(ct, &key->ct.label); + ovs_ct_get_labels(ct, &key->ct.labels); return 0; } @@ -275,13 +275,15 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto) case NFPROTO_IPV6: { u8 nexthdr = ipv6_hdr(skb)->nexthdr; __be16 frag_off; + int ofs; - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); - if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { + ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, + &frag_off); + if (ofs < 0 || (frag_off & htons(~0x7)) != 0) { pr_debug("proto header not found\n"); return NF_ACCEPT; } + protoff = ofs; break; } default: @@ -463,12 +465,12 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, return 0; } -static bool label_nonzero(const struct ovs_key_ct_label *label) +static bool labels_nonzero(const struct ovs_key_ct_labels *labels) { size_t i; - for (i = 0; i < sizeof(*label); i++) - if (label->ct_label[i]) + for (i = 0; i < sizeof(*labels); i++) + if (labels->ct_labels[i]) return true; return false; @@ -491,7 +493,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, return err; } - if (info->flags & OVS_CT_F_COMMIT) + if (info->commit) err = ovs_ct_commit(net, key, info, skb); else err = ovs_ct_lookup(net, key, info, skb); @@ -504,9 +506,9 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, if (err) goto err; } - if (label_nonzero(&info->label.mask)) - err = ovs_ct_set_label(skb, key, &info->label.value, - &info->label.mask); + if (labels_nonzero(&info->labels.mask)) + err = ovs_ct_set_labels(skb, key, &info->labels.value, + &info->labels.mask); err: skb_push(skb, nh_ofs); return err; @@ -537,14 +539,13 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, } static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { - [OVS_CT_ATTR_FLAGS] = { .minlen = sizeof(u32), - .maxlen = sizeof(u32) }, + [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 }, [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16), .maxlen = sizeof(u16) }, [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark), .maxlen = sizeof(struct md_mark) }, - [OVS_CT_ATTR_LABEL] = { .minlen = sizeof(struct md_label), - .maxlen = sizeof(struct md_label) }, + [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels), + .maxlen = sizeof(struct md_labels) }, [OVS_CT_ATTR_HELPER] = { .minlen = 1, .maxlen = NF_CT_HELPER_NAME_LEN } }; @@ -574,8 +575,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } switch (type) { - case OVS_CT_ATTR_FLAGS: - info->flags = nla_get_u32(a); + case OVS_CT_ATTR_COMMIT: + info->commit = true; break; #ifdef CONFIG_NF_CONNTRACK_ZONES case OVS_CT_ATTR_ZONE: @@ -591,10 +592,10 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } #endif #ifdef CONFIG_NF_CONNTRACK_LABELS - case OVS_CT_ATTR_LABEL: { - struct md_label *label = nla_data(a); + case OVS_CT_ATTR_LABELS: { + struct md_labels *labels = nla_data(a); - info->label = *label; + info->labels = *labels; break; } #endif @@ -631,7 +632,7 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr) attr == OVS_KEY_ATTR_CT_MARK) return true; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - attr == OVS_KEY_ATTR_CT_LABEL) { + attr == OVS_KEY_ATTR_CT_LABELS) { struct ovs_net *ovs_net = net_generic(net, ovs_net_id); return ovs_net->xt_label; @@ -699,7 +700,7 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, if (!start) return -EMSGSIZE; - if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags)) + if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) @@ -709,8 +710,8 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, &ct_info->mark)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label), - &ct_info->label)) + nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels), + &ct_info->labels)) return -EMSGSIZE; if (ct_info->helper) { if (nla_put_string(skb, OVS_CT_ATTR_HELPER, @@ -735,7 +736,7 @@ void ovs_ct_free_action(const struct nlattr *a) void ovs_ct_init(struct net *net) { - unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE; + unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); if (nf_connlabels_get(net, n_bits)) { diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h index 43f5dd7a5577..da8714942c95 100644 --- a/net/openvswitch/conntrack.h +++ b/net/openvswitch/conntrack.h @@ -34,6 +34,13 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *, void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key); int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb); void ovs_ct_free_action(const struct nlattr *a); + +static inline bool ovs_ct_state_supported(u32 state) +{ + return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | + OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | + OVS_CS_F_INVALID | OVS_CS_F_TRACKED)); +} #else #include <linux/errno.h> @@ -46,6 +53,11 @@ static inline bool ovs_ct_verify(struct net *net, int attr) return false; } +static inline bool ovs_ct_state_supported(u32 state) +{ + return false; +} + static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla, const struct sw_flow_key *key, struct sw_flow_actions **acts, bool log) @@ -72,7 +84,7 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb, key->ct.state = 0; key->ct.zone = 0; key->ct.mark = 0; - memset(&key->ct.label, 0, sizeof(key->ct.label)); + memset(&key->ct.labels, 0, sizeof(key->ct.labels)); } static inline int ovs_ct_put_key(const struct sw_flow_key *key, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6fbd2decb19e..b816ff871528 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -952,7 +952,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (error) goto err_kfree_flow; - ovs_flow_mask_key(&new_flow->key, &key, &mask); + ovs_flow_mask_key(&new_flow->key, &key, true, &mask); /* Extract flow identifier. */ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], @@ -1080,7 +1080,7 @@ static struct sw_flow_actions *get_flow_actions(struct net *net, struct sw_flow_key masked_key; int error; - ovs_flow_mask_key(&masked_key, key, mask); + ovs_flow_mask_key(&masked_key, key, true, mask); error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); if (error) { OVS_NLERR(log, diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index fe527d2dd4b7..8cfa15a08668 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -116,7 +116,7 @@ struct sw_flow_key { u16 zone; u32 mark; u8 state; - struct ovs_key_ct_label label; + struct ovs_key_ct_labels labels; } ct; } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index c92d6a262bc5..171a691f1c32 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -57,6 +57,7 @@ struct ovs_len_tbl { }; #define OVS_ATTR_NESTED -1 +#define OVS_ATTR_VARIABLE -2 static void update_range(struct sw_flow_match *match, size_t offset, size_t size, bool is_mask) @@ -290,10 +291,10 @@ size_t ovs_key_attr_size(void) + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ - + nla_total_size(1) /* OVS_KEY_ATTR_CT_STATE */ + + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */ + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */ + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */ - + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABEL */ + + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ @@ -304,6 +305,10 @@ size_t ovs_key_attr_size(void) + nla_total_size(28); /* OVS_KEY_ATTR_ND */ } +static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = { + [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) }, +}; + static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, @@ -315,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, - [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED }, - [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED }, + [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE }, + [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED, + .next = ovs_vxlan_ext_key_lens }, }; /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ @@ -343,12 +349,19 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED, .next = ovs_tunnel_key_lens, }, [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) }, - [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u8) }, + [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) }, [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) }, [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) }, - [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, + [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) }, }; +static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) +{ + return expected_len == attr_len || + expected_len == OVS_ATTR_NESTED || + expected_len == OVS_ATTR_VARIABLE; +} + static bool is_all_zero(const u8 *fp, size_t size) { int i; @@ -388,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } expected_len = ovs_key_lens[type].len; - if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) { + if (!check_attr_len(nla_len(nla), expected_len)) { OVS_NLERR(log, "Key %d has unexpected len %d expected %d", type, nla_len(nla), expected_len); return -EINVAL; @@ -473,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a, return 0; } -static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = { - [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 }, -}; - -static int vxlan_tun_opt_from_nlattr(const struct nlattr *a, +static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, struct sw_flow_match *match, bool is_mask, bool log) { - struct nlattr *tb[OVS_VXLAN_EXT_MAX+1]; + struct nlattr *a; + int rem; unsigned long opt_key_offset; struct vxlan_metadata opts; - int err; BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); - err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy); - if (err < 0) - return err; - memset(&opts, 0, sizeof(opts)); + nla_for_each_nested(a, attr, rem) { + int type = nla_type(a); + + if (type > OVS_VXLAN_EXT_MAX) { + OVS_NLERR(log, "VXLAN extension %d out of range max %d", + type, OVS_VXLAN_EXT_MAX); + return -EINVAL; + } + + if (!check_attr_len(nla_len(a), + ovs_vxlan_ext_key_lens[type].len)) { + OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d", + type, nla_len(a), + ovs_vxlan_ext_key_lens[type].len); + return -EINVAL; + } - if (tb[OVS_VXLAN_EXT_GBP]) - opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]); + switch (type) { + case OVS_VXLAN_EXT_GBP: + opts.gbp = nla_get_u32(a); + break; + default: + OVS_NLERR(log, "Unknown VXLAN extension attribute %d", + type); + return -EINVAL; + } + } + if (rem) { + OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.", + rem); + return -EINVAL; + } if (!is_mask) SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); @@ -528,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, return -EINVAL; } - if (ovs_tunnel_key_lens[type].len != nla_len(a) && - ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) { + if (!check_attr_len(nla_len(a), + ovs_tunnel_key_lens[type].len)) { OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", type, nla_len(a), ovs_tunnel_key_lens[type].len); return -EINVAL; @@ -780,7 +814,13 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) && ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) { - u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]); + u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]); + + if (!is_mask && !ovs_ct_state_supported(ct_state)) { + OVS_NLERR(log, "ct_state flags %08x unsupported", + ct_state); + return -EINVAL; + } SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE); @@ -799,14 +839,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK); } - if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) && - ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) { - const struct ovs_key_ct_label *cl; + if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) && + ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) { + const struct ovs_key_ct_labels *cl; - cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]); - SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label, + cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]); + SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels, sizeof(*cl), is_mask); - *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL); + *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS); } return 0; } @@ -1052,10 +1092,13 @@ static void nlattr_set(struct nlattr *attr, u8 val, /* The nlattr stream should already have been validated */ nla_for_each_nested(nla, attr, rem) { - if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) - nlattr_set(nla, val, tbl[nla_type(nla)].next); - else + if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { + if (tbl[nla_type(nla)].next) + tbl = tbl[nla_type(nla)].next; + nlattr_set(nla, val, tbl); + } else { memset(nla_data(nla), val, nla_len(nla)); + } } } @@ -1922,8 +1965,7 @@ static int validate_set(const struct nlattr *a, key_len /= 2; if (key_type > OVS_KEY_ATTR_MAX || - (ovs_key_lens[key_type].len != key_len && - ovs_key_lens[key_type].len != OVS_ATTR_NESTED)) + !check_attr_len(key_len, ovs_key_lens[key_type].len)) return -EINVAL; if (masked && !validate_masked(nla_data(ovs_key), key_len)) @@ -1937,7 +1979,7 @@ static int validate_set(const struct nlattr *a, case OVS_KEY_ATTR_PRIORITY: case OVS_KEY_ATTR_SKB_MARK: case OVS_KEY_ATTR_CT_MARK: - case OVS_KEY_ATTR_CT_LABEL: + case OVS_KEY_ATTR_CT_LABELS: case OVS_KEY_ATTR_ETHERNET: break; diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index d22d8e948d0f..c7f74aab34b9 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -57,20 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) } void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, - const struct sw_flow_mask *mask) + bool full, const struct sw_flow_mask *mask) { - const long *m = (const long *)((const u8 *)&mask->key + - mask->range.start); - const long *s = (const long *)((const u8 *)src + - mask->range.start); - long *d = (long *)((u8 *)dst + mask->range.start); + int start = full ? 0 : mask->range.start; + int len = full ? sizeof *dst : range_n_bytes(&mask->range); + const long *m = (const long *)((const u8 *)&mask->key + start); + const long *s = (const long *)((const u8 *)src + start); + long *d = (long *)((u8 *)dst + start); int i; - /* The memory outside of the 'mask->range' are not set since - * further operations on 'dst' only uses contents within - * 'mask->range'. + /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, + * if 'full' is false the memory outside of the 'mask->range' is left + * uninitialized. This can be used as an optimization when further + * operations on 'dst' only use contents within 'mask->range'. */ - for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) + for (i = 0; i < len; i += sizeof(long)) *d++ = *s++ & *m++; } @@ -92,7 +93,8 @@ struct sw_flow *ovs_flow_alloc(void) /* Initialize the default stat node. */ stats = kmem_cache_alloc_node(flow_stats_cache, - GFP_KERNEL | __GFP_ZERO, 0); + GFP_KERNEL | __GFP_ZERO, + node_online(0) ? 0 : NUMA_NO_NODE); if (!stats) goto err; @@ -475,7 +477,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, u32 hash; struct sw_flow_key masked_key; - ovs_flow_mask_key(&masked_key, unmasked, mask); + ovs_flow_mask_key(&masked_key, unmasked, false, mask); hash = flow_hash(&masked_key, &mask->range); head = find_bucket(ti, hash); hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 616eda10d955..2dd9900f533d 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, - const struct sw_flow_mask *mask); + bool full, const struct sw_flow_mask *mask); #endif /* flow_table.h */ diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index dc81dc619aa2..12a36ac21eda 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -280,35 +280,19 @@ void ovs_vport_del(struct vport *vport) */ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { - struct net_device *dev = vport->dev; - int i; - - memset(stats, 0, sizeof(*stats)); - stats->rx_errors = dev->stats.rx_errors; - stats->tx_errors = dev->stats.tx_errors; - stats->tx_dropped = dev->stats.tx_dropped; - stats->rx_dropped = dev->stats.rx_dropped; - - stats->rx_dropped += atomic_long_read(&dev->rx_dropped); - stats->tx_dropped += atomic_long_read(&dev->tx_dropped); - - for_each_possible_cpu(i) { - const struct pcpu_sw_netstats *percpu_stats; - struct pcpu_sw_netstats local_stats; - unsigned int start; - - percpu_stats = per_cpu_ptr(dev->tstats, i); - - do { - start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); - local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); - - stats->rx_bytes += local_stats.rx_bytes; - stats->rx_packets += local_stats.rx_packets; - stats->tx_bytes += local_stats.tx_bytes; - stats->tx_packets += local_stats.tx_packets; - } + const struct rtnl_link_stats64 *dev_stats; + struct rtnl_link_stats64 temp; + + dev_stats = dev_get_stats(vport->dev, &temp); + stats->rx_errors = dev_stats->rx_errors; + stats->tx_errors = dev_stats->tx_errors; + stats->tx_dropped = dev_stats->tx_dropped; + stats->rx_dropped = dev_stats->rx_dropped; + + stats->rx_bytes = dev_stats->rx_bytes; + stats->rx_packets = dev_stats->rx_packets; + stats->tx_bytes = dev_stats->tx_bytes; + stats->tx_packets = dev_stats->tx_packets; } /** @@ -460,6 +444,15 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb, OVS_CB(skb)->input_vport = vport; OVS_CB(skb)->mru = 0; + if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) { + u32 mark; + + mark = skb->mark; + skb_scrub_packet(skb, true); + skb->mark = mark; + tun_info = NULL; + } + /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_key_extract(tun_info, skb, &key); if (unlikely(error)) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 7b8e39a22387..aa4b15c35884 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -230,6 +230,8 @@ struct packet_skb_cb { } sa; }; +#define vio_le() virtio_legacy_is_little_endian() + #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) @@ -2680,15 +2682,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_unlock; if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - (__virtio16_to_cpu(false, vnet_hdr.csum_start) + - __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > - __virtio16_to_cpu(false, vnet_hdr.hdr_len))) - vnet_hdr.hdr_len = __cpu_to_virtio16(false, - __virtio16_to_cpu(false, vnet_hdr.csum_start) + - __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); + (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 > + __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len))) + vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(), + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2); err = -EINVAL; - if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) + if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len) goto out_unlock; if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { @@ -2731,7 +2733,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, - __virtio16_to_cpu(false, vnet_hdr.hdr_len), + __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; @@ -2778,8 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (po->has_vnet_hdr) { if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); - u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); + u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start); + u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset); if (!skb_partial_csum_set(skb, s, o)) { err = -EINVAL; goto out_free; @@ -2787,7 +2789,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) } skb_shinfo(skb)->gso_size = - __virtio16_to_cpu(false, vnet_hdr.gso_size); + __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size); skb_shinfo(skb)->gso_type = gso_type; /* Header must be checked, and gso_segs computed. */ @@ -3161,9 +3163,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* This is a hint as to how much should be linear. */ vnet_hdr.hdr_len = - __cpu_to_virtio16(false, skb_headlen(skb)); + __cpu_to_virtio16(vio_le(), skb_headlen(skb)); vnet_hdr.gso_size = - __cpu_to_virtio16(false, sinfo->gso_size); + __cpu_to_virtio16(vio_le(), sinfo->gso_size); if (sinfo->gso_type & SKB_GSO_TCPV4) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) @@ -3181,9 +3183,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - vnet_hdr.csum_start = __cpu_to_virtio16(false, + vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(), skb_checksum_start_offset(skb)); - vnet_hdr.csum_offset = __cpu_to_virtio16(false, + vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(), skb->csum_offset); } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 2d1be4a760fd..32fcdecdb9e2 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -31,13 +31,17 @@ #define MIRRED_TAB_MASK 7 static LIST_HEAD(mirred_list); +static DEFINE_SPINLOCK(mirred_list_lock); static void tcf_mirred_release(struct tc_action *a, int bind) { struct tcf_mirred *m = to_mirred(a); struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); + /* We could be called either in a RCU callback or with RTNL lock held. */ + spin_lock_bh(&mirred_list_lock); list_del(&m->tcfm_list); + spin_unlock_bh(&mirred_list_lock); if (dev) dev_put(dev); } @@ -103,10 +107,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } else { if (bind) return 0; - if (!ovr) { - tcf_hash_release(a, bind); + + tcf_hash_release(a, bind); + if (!ovr) return -EEXIST; - } } m = to_mirred(a); @@ -123,7 +127,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } if (ret == ACT_P_CREATED) { + spin_lock_bh(&mirred_list_lock); list_add(&m->tcfm_list, &mirred_list); + spin_unlock_bh(&mirred_list_lock); tcf_hash_insert(a); } @@ -173,6 +179,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; + skb_sender_cpu_clear(skb2); err = dev_queue_xmit(skb2); if (err) { @@ -221,7 +228,8 @@ static int mirred_device_event(struct notifier_block *unused, struct tcf_mirred *m; ASSERT_RTNL(); - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + spin_lock_bh(&mirred_list_lock); list_for_each_entry(m, &mirred_list, tcfm_list) { if (rcu_access_pointer(m->tcfm_dev) == dev) { dev_put(dev); @@ -231,6 +239,8 @@ static int mirred_device_event(struct notifier_block *unused, RCU_INIT_POINTER(m->tcfm_dev, NULL); } } + spin_unlock_bh(&mirred_list_lock); + } return NOTIFY_DONE; } diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 715e01e5910a..f23a3b68bba6 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -33,7 +33,6 @@ struct fw_head { u32 mask; - bool mask_set; struct fw_filter __rcu *ht[HTSIZE]; struct rcu_head rcu; }; @@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, } } } else { - /* old method */ + /* Old method: classify the packet using its skb mark. */ if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id ^ tp->q->handle)))) { res->classid = id; @@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) static int fw_init(struct tcf_proto *tp) { - struct fw_head *head; - - head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; - - head->mask_set = false; - rcu_assign_pointer(tp->root, head); + /* We don't allocate fw_head here, because in the old method + * we don't need it at all. + */ return 0; } @@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, int err; if (!opt) - return handle ? -EINVAL : 0; + return handle ? -EINVAL : 0; /* Succeed if it is old method. */ err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); if (err < 0) @@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (!handle) return -EINVAL; - if (!head->mask_set) { - head->mask = 0xFFFFFFFF; + if (!head) { + u32 mask = 0xFFFFFFFF; if (tb[TCA_FW_MASK]) - head->mask = nla_get_u32(tb[TCA_FW_MASK]); - head->mask_set = true; + mask = nla_get_u32(tb[TCA_FW_MASK]); + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOBUFS; + head->mask = mask; + + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 9d15cb6b8cb1..86b04e31e60b 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch) return bucket - q->buckets; } +static unsigned int hhf_qdisc_drop(struct Qdisc *sch) +{ + unsigned int prev_backlog; + + prev_backlog = sch->qstats.backlog; + hhf_drop(sch); + return prev_backlog - sch->qstats.backlog; +} + static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct hhf_sched_data *q = qdisc_priv(sch); @@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = { .enqueue = hhf_enqueue, .dequeue = hhf_dequeue, .peek = qdisc_peek_dequeued, - .drop = hhf_drop, + .drop = hhf_qdisc_drop, .init = hhf_init, .reset = hhf_reset, .destroy = hhf_destroy, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, * within this document. * * Our basic strategy is to round-robin transports in priorities - * according to sctp_state_prio_map[] e.g., if no such + * according to sctp_trans_score() e.g., if no such * transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. */ -static const u8 sctp_trans_state_to_prio_map[] = { - [SCTP_ACTIVE] = 3, /* best case */ - [SCTP_UNKNOWN] = 2, - [SCTP_PF] = 1, - [SCTP_INACTIVE] = 0, /* worst case */ -}; - static u8 sctp_trans_score(const struct sctp_transport *trans) { - return sctp_trans_state_to_prio_map[trans->state]; + switch (trans->state) { + case SCTP_ACTIVE: + return 3; /* best case */ + case SCTP_UNKNOWN: + return 2; + case SCTP_PF: + return 1; + default: /* case SCTP_INACTIVE */ + return 0; /* worst case */ + } } static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b7143337e4fa..3d9ea9a48289 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1186,7 +1186,7 @@ static void sctp_v4_del_protocol(void) unregister_inetaddr_notifier(&sctp_inetaddr_notifier); } -static int __net_init sctp_net_init(struct net *net) +static int __net_init sctp_defaults_init(struct net *net) { int status; @@ -1279,12 +1279,6 @@ static int __net_init sctp_net_init(struct net *net) sctp_dbg_objcnt_init(net); - /* Initialize the control inode/socket for handling OOTB packets. */ - if ((status = sctp_ctl_sock_init(net))) { - pr_err("Failed to initialize the SCTP control sock\n"); - goto err_ctl_sock_init; - } - /* Initialize the local address list. */ INIT_LIST_HEAD(&net->sctp.local_addr_list); spin_lock_init(&net->sctp.local_addr_lock); @@ -1300,9 +1294,6 @@ static int __net_init sctp_net_init(struct net *net) return 0; -err_ctl_sock_init: - sctp_dbg_objcnt_exit(net); - sctp_proc_exit(net); err_init_proc: cleanup_sctp_mibs(net); err_init_mibs: @@ -1311,15 +1302,12 @@ err_sysctl_register: return status; } -static void __net_exit sctp_net_exit(struct net *net) +static void __net_exit sctp_defaults_exit(struct net *net) { /* Free the local address list */ sctp_free_addr_wq(net); sctp_free_local_addr_list(net); - /* Free the control endpoint. */ - inet_ctl_sock_destroy(net->sctp.ctl_sock); - sctp_dbg_objcnt_exit(net); sctp_proc_exit(net); @@ -1327,9 +1315,32 @@ static void __net_exit sctp_net_exit(struct net *net) sctp_sysctl_net_unregister(net); } -static struct pernet_operations sctp_net_ops = { - .init = sctp_net_init, - .exit = sctp_net_exit, +static struct pernet_operations sctp_defaults_ops = { + .init = sctp_defaults_init, + .exit = sctp_defaults_exit, +}; + +static int __net_init sctp_ctrlsock_init(struct net *net) +{ + int status; + + /* Initialize the control inode/socket for handling OOTB packets. */ + status = sctp_ctl_sock_init(net); + if (status) + pr_err("Failed to initialize the SCTP control sock\n"); + + return status; +} + +static void __net_init sctp_ctrlsock_exit(struct net *net) +{ + /* Free the control endpoint. */ + inet_ctl_sock_destroy(net->sctp.ctl_sock); +} + +static struct pernet_operations sctp_ctrlsock_ops = { + .init = sctp_ctrlsock_init, + .exit = sctp_ctrlsock_exit, }; /* Initialize the universe into something sensible. */ @@ -1462,8 +1473,11 @@ static __init int sctp_init(void) sctp_v4_pf_init(); sctp_v6_pf_init(); - status = sctp_v4_protosw_init(); + status = register_pernet_subsys(&sctp_defaults_ops); + if (status) + goto err_register_defaults; + status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; @@ -1471,9 +1485,9 @@ static __init int sctp_init(void) if (status) goto err_v6_protosw_init; - status = register_pernet_subsys(&sctp_net_ops); + status = register_pernet_subsys(&sctp_ctrlsock_ops); if (status) - goto err_register_pernet_subsys; + goto err_register_ctrlsock; status = sctp_v4_add_protocol(); if (status) @@ -1489,12 +1503,14 @@ out: err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: - unregister_pernet_subsys(&sctp_net_ops); -err_register_pernet_subsys: + unregister_pernet_subsys(&sctp_ctrlsock_ops); +err_register_ctrlsock: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: + unregister_pernet_subsys(&sctp_defaults_ops); +err_register_defaults: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); @@ -1527,12 +1543,14 @@ static __exit void sctp_exit(void) sctp_v6_del_protocol(); sctp_v4_del_protocol(); - unregister_pernet_subsys(&sctp_net_ops); + unregister_pernet_subsys(&sctp_ctrlsock_ops); /* Free protosw registrations */ sctp_v6_protosw_exit(); sctp_v4_protosw_exit(); + unregister_pernet_subsys(&sctp_defaults_ops); + /* Unregister with socket layer. */ sctp_v6_pf_exit(); sctp_v4_pf_exit(); diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); /* Check whether a task is in the sock. */ - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) transport, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -285,11 +286,12 @@ out_unlock: static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); int error = 0; - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); @@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, (void *)timeout_type, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } @@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); - if (error) - asoc->base.sk->sk_err = -error; + if (error) + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b140c092d226..f14f24ee9983 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -297,7 +297,7 @@ static int rpc_complete_task(struct rpc_task *task) clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); ret = atomic_dec_and_test(&task->tk_count); if (waitqueue_active(wq)) - __wake_up_locked_key(wq, TASK_NORMAL, 1, &k); + __wake_up_locked_key(wq, TASK_NORMAL, &k); spin_unlock_irqrestore(&wq->lock, flags); return ret; } @@ -1092,14 +1092,10 @@ void rpc_destroy_mempool(void) { rpciod_stop(); - if (rpc_buffer_mempool) - mempool_destroy(rpc_buffer_mempool); - if (rpc_task_mempool) - mempool_destroy(rpc_task_mempool); - if (rpc_task_slabp) - kmem_cache_destroy(rpc_task_slabp); - if (rpc_buffer_slabp) - kmem_cache_destroy(rpc_buffer_slabp); + mempool_destroy(rpc_buffer_mempool); + mempool_destroy(rpc_task_mempool); + kmem_cache_destroy(rpc_task_slabp); + kmem_cache_destroy(rpc_buffer_slabp); rpc_destroy_wait_queue(&delay_queue); } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ab5dd621ae0c..2e98f4a243e5 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work) clear_bit(XPRT_CLOSE_WAIT, &xprt->state); xprt->ops->close(xprt); xprt_release_write(xprt, NULL); + wake_up_bit(&xprt->state, XPRT_LOCKED); } /** @@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) xprt->ops->release_xprt(xprt, NULL); out: spin_unlock_bh(&xprt->transport_lock); + wake_up_bit(&xprt->state, XPRT_LOCKED); } /** @@ -1394,6 +1396,10 @@ out: static void xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); + + /* Exclude transport connect/disconnect handlers */ + wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); + del_timer_sync(&xprt->timer); rpc_xprt_debugfs_unregister(xprt); diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -39,25 +39,6 @@ static int fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; - struct ib_mr *mr; - - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - } else { - mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(mr)) { - pr_err("%s: ib_get_dma_mr for failed with %lX\n", - __func__, PTR_ERR(mr)); - return -ENOMEM; - } - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; - ia->ri_dma_mr = mr; - } - return 0; } diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct ib_device_attr *devattr = &ia->ri_devattr; int depth, delta; - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, devattr->max_fast_reg_page_list_len); diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -23,7 +23,6 @@ static int physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_mr *mr; /* Obtain an rkey to use for RPC data payloads. @@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, __func__, PTR_ERR(mr)); return -ENOMEM; } - ia->ri_dma_mr = mr; - - /* Obtain an lkey to use for regbufs. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - else - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; + ia->ri_dma_mr = mr; return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index cb5174284074..f0c3ff67ca98 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->read_hdr = head; pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); for (pno = 0; pno < pages_needed; pno++) { int len = min_t(int, rs_length, PAGE_SIZE - pg_off); @@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->frmr = frmr; pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); frmr->direction = DMA_FROM_DEVICE; @@ -531,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, rqstp->rq_arg.page_base = head->arg.page_base; /* rq_respages starts after the last arg page */ - rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; + rqstp->rq_respages = &rqstp->rq_pages[page_no]; rqstp->rq_next_page = rqstp->rq_respages + 1; /* Rebuild rq_arg head and tail. */ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 64443eb754ad..41e452bc580c 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) xprt_clear_connected(xprt); - rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); + rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ia_close(&r_xprt->rx_ia); xprt_rdma_free_addresses(xprt); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..5502d4dade74 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -543,11 +543,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) } if (memreg == RPCRDMA_FRMR) { - /* Requires both frmr reg and local dma lkey */ - if (((devattr->device_cap_flags & - (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != - (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || - (devattr->max_fast_reg_page_list_len == 0)) { + if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || + (devattr->max_fast_reg_page_list_len == 0)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; @@ -557,6 +554,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) if (!ia->ri_device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); + rc = -EINVAL; goto out3; } } @@ -755,19 +753,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) cancel_delayed_work_sync(&ep->rep_connect_worker); - if (ia->ri_id->qp) { + if (ia->ri_id->qp) rpcrdma_ep_disconnect(ep, ia); + + rpcrdma_clean_cq(ep->rep_attr.recv_cq); + rpcrdma_clean_cq(ep->rep_attr.send_cq); + + if (ia->ri_id->qp) { rdma_destroy_qp(ia->ri_id); ia->ri_id->qp = NULL; } - rpcrdma_clean_cq(ep->rep_attr.recv_cq); rc = ib_destroy_cq(ep->rep_attr.recv_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, rc); - rpcrdma_clean_cq(ep->rep_attr.send_cq); rc = ib_destroy_cq(ep->rep_attr.send_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", @@ -1252,7 +1253,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) goto out_free; iov->length = size; - iov->lkey = ia->ri_dma_lkey; + iov->lkey = ia->ri_pd->local_dma_lkey; rb->rg_size = size; rb->rg_owner = NULL; return rb; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -65,7 +65,6 @@ struct rpcrdma_ia { struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; struct ib_mr *ri_dma_mr; - u32 ri_dma_lkey; struct completion ri_done; int ri_async_rc; unsigned int ri_max_frmr_depth; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7be90bc1a7c2..1a85e0ed0b48 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt) xs_sock_reset_connection_flags(xprt); /* Mark transport as closed and wake up all pending tasks */ xprt_disconnect_done(xprt); - xprt_force_disconnect(xprt); } /** @@ -881,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt) */ static void xs_destroy(struct rpc_xprt *xprt) { + struct sock_xprt *transport = container_of(xprt, + struct sock_xprt, xprt); dprintk("RPC: xs_destroy xprt %p\n", xprt); + cancel_delayed_work_sync(&transport->connect_worker); xs_close(xprt); xs_xprt_free(xprt); module_put(THIS_MODULE); @@ -1435,6 +1437,7 @@ out: static void xs_tcp_state_change(struct sock *sk) { struct rpc_xprt *xprt; + struct sock_xprt *transport; read_lock_bh(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk))) @@ -1446,13 +1449,12 @@ static void xs_tcp_state_change(struct sock *sk) sock_flag(sk, SOCK_ZAPPED), sk->sk_shutdown); + transport = container_of(xprt, struct sock_xprt, xprt); trace_rpc_socket_state_change(xprt, sk->sk_socket); switch (sk->sk_state) { case TCP_ESTABLISHED: spin_lock(&xprt->transport_lock); if (!xprt_test_and_set_connected(xprt)) { - struct sock_xprt *transport = container_of(xprt, - struct sock_xprt, xprt); /* Reset TCP record info */ transport->tcp_offset = 0; @@ -1461,6 +1463,8 @@ static void xs_tcp_state_change(struct sock *sk) transport->tcp_flags = TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; xprt->connect_cookie++; + clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); + xprt_clear_connecting(xprt); xprt_wake_pending_tasks(xprt, -EAGAIN); } @@ -1496,6 +1500,9 @@ static void xs_tcp_state_change(struct sock *sk) smp_mb__after_atomic(); break; case TCP_CLOSE: + if (test_and_clear_bit(XPRT_SOCK_CONNECTING, + &transport->sock_state)) + xprt_clear_connecting(xprt); xs_sock_mark_closed(xprt); } out: @@ -2179,6 +2186,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; + set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); switch (ret) { case 0: @@ -2240,7 +2248,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) case -EINPROGRESS: case -EALREADY: xprt_unlock_connect(xprt, transport); - xprt_clear_connecting(xprt); return; case -EINVAL: /* Happens, for instance, if the user specified a link diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index fda38f830a10..77f5d17e2612 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -16,6 +16,7 @@ #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> +#include <linux/if_vlan.h> #include <net/ip_fib.h> #include <net/switchdev.h> @@ -634,6 +635,8 @@ static int switchdev_port_br_afspec(struct net_device *dev, if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) + return -EINVAL; vlan->flags = vinfo->flags; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { if (vlan->vid_begin) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 562c926a51cc..c5ac436235e0 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -539,6 +539,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) *err = -TIPC_ERR_NO_NAME; if (skb_linearize(skb)) return false; + msg = buf_msg(skb); if (msg_reroute_cnt(msg)) return false; dnode = addr_domain(net, msg_lookup_scope(msg)); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a82c5848d4bc..5351a3f97e8e 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -357,7 +357,7 @@ static inline u32 msg_importance(struct tipc_msg *m) if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m))) return usr; if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)) - return msg_bits(m, 5, 13, 0x7); + return msg_bits(m, 9, 0, 0x7); return TIPC_SYSTEM_IMPORTANCE; } @@ -366,7 +366,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i) int usr = msg_user(m); if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))) - msg_set_bits(m, 5, 13, 0x7, i); + msg_set_bits(m, 9, 0, 0x7, i); else if (i < TIPC_SYSTEM_IMPORTANCE) msg_set_user(m, i); else diff --git a/net/tipc/node.c b/net/tipc/node.c index 703875fd6cde..2c32a83037a3 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1116,7 +1116,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, } /* Ignore duplicate packets */ - if (less(oseqno, rcv_nxt)) + if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) return true; /* Initiate or update failover mode if applicable */ @@ -1146,8 +1146,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, if (!pl || !tipc_link_is_up(pl)) return true; - /* Initiate or update synch mode if applicable */ - if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) { + /* Initiate synch mode if applicable */ + if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { syncpt = iseqno + exp_pkts - 1; if (!tipc_link_is_up(l)) { tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..94f658235fb4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2064,6 +2064,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) goto out; } + if (flags & MSG_PEEK) + skip = sk_peek_offset(sk, flags); + else + skip = 0; + do { int chunk; struct sk_buff *skb, *last; @@ -2112,7 +2117,6 @@ unlock: break; } - skip = sk_peek_offset(sk, flags); while (skip >= unix_skb_len(skb)) { skip -= unix_skb_len(skb); last = skb; @@ -2181,6 +2185,17 @@ unlock: sk_peek_offset_fwd(sk, chunk); + if (UNIXCB(skb).fp) + break; + + skip = 0; + last = skb; + last_len = skb->len; + unix_state_lock(sk); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (skb) + goto again; + unix_state_unlock(sk); break; } } while (size); diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c @@ -1,13 +1,13 @@ /* * Here's a sample kernel module showing the use of jprobes to dump - * the arguments of do_fork(). + * the arguments of _do_fork(). * * For more information on theory of operation of jprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the - * console whenever do_fork() is invoked to create a new process. + * console whenever _do_fork() is invoked to create a new process. * (Some messages may be suppressed if syslogd is configured to * eliminate duplicate messages.) */ @@ -17,13 +17,13 @@ #include <linux/kprobes.h> /* - * Jumper probe for do_fork. + * Jumper probe for _do_fork. * Mirror principle enables access to arguments of the probed routine * from the probe handler. */ -/* Proxy routine having the same arguments as actual do_fork() routine */ -static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, +/* Proxy routine having the same arguments as actual _do_fork() routine */ +static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { @@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, } static struct jprobe my_jprobe = { - .entry = jdo_fork, + .entry = j_do_fork, .kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }, }; diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -1,13 +1,13 @@ /* * NOTE: This example is works on x86 and powerpc. * Here's a sample kernel module showing the use of kprobes to dump a - * stack trace and selected registers when do_fork() is called. + * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see * Documentation/kprobes.txt * * You will see the trace data in /var/log/messages and on the console - * whenever do_fork() is invoked to create a new process. + * whenever _do_fork() is invoked to create a new process. */ #include <linux/kernel.h> @@ -16,7 +16,7 @@ /* For each probe you need to allocate a kprobe structure */ static struct kprobe kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }; /* kprobe pre_handler: called just before the probed instruction is executed */ diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -7,7 +7,7 @@ * * usage: insmod kretprobe_example.ko func=<func_name> * - * If no func_name is specified, do_fork is instrumented + * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt @@ -25,7 +25,7 @@ #include <linux/limits.h> #include <linux/sched.h> -static char func_name[NAME_MAX] = "do_fork"; +static char func_name[NAME_MAX] = "_do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 10d23ca9f617..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -1,15 +1,15 @@ /* Extract X.509 certificate in DER form from PKCS#11 or PEM. * - * Copyright © 2014 Red Hat, Inc. All Rights Reserved. - * Copyright © 2015 Intel Corporation. + * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. + * Copyright © 2015 Intel Corporation. * * Authors: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the licence, or (at your option) any later version. */ #define _GNU_SOURCE #include <stdio.h> @@ -17,13 +17,9 @@ #include <stdint.h> #include <stdbool.h> #include <string.h> -#include <getopt.h> #include <err.h> -#include <arpa/inet.h> #include <openssl/bio.h> -#include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/pkcs7.h> #include <openssl/err.h> #include <openssl/engine.h> diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 0cd46e129920..b967e4f9fed2 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -115,7 +115,7 @@ esac BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" # Setup the directory structure -rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" +rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files mkdir -m 755 -p "$tmpdir/DEBIAN" mkdir -p "$tmpdir/lib" "$tmpdir/boot" mkdir -p "$fwdir/lib/firmware/$version/" @@ -408,7 +408,7 @@ binary-arch: \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg clean: - rm -rf debian/*tmp + rm -rf debian/*tmp debian/files mv debian/ debian.backup # debian/ might be cleaned away \$(MAKE) clean mv debian.backup debian diff --git a/scripts/sign-file.c b/scripts/sign-file.c index 058bba3103e2..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -1,12 +1,15 @@ /* Sign a module file using the given key. * - * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) + * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. + * Copyright © 2015 Intel Corporation. + * + * Authors: David Howells <dhowells@redhat.com> + * David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the licence, or (at your option) any later version. */ #define _GNU_SOURCE #include <stdio.h> @@ -17,13 +20,34 @@ #include <getopt.h> #include <err.h> #include <arpa/inet.h> +#include <openssl/opensslv.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/cms.h> #include <openssl/err.h> #include <openssl/engine.h> +/* + * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to + * assume that it's not available and its header file is missing and that we + * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts + * the options we have on specifying the X.509 certificate we want. + * + * Further, older versions of OpenSSL don't support manually adding signers to + * the PKCS#7 message so have to accept that we get a certificate included in + * the signature message. Nor do such older versions of OpenSSL support + * signing with anything other than SHA1 - so we're stuck with that if such is + * the case. + */ +#if OPENSSL_VERSION_NUMBER < 0x10000000L +#define USE_PKCS7 +#endif +#ifndef USE_PKCS7 +#include <openssl/cms.h> +#else +#include <openssl/pkcs7.h> +#endif + struct module_signature { uint8_t algo; /* Public-key crypto algorithm [0] */ uint8_t hash; /* Digest algorithm [0] */ @@ -107,30 +131,42 @@ int main(int argc, char **argv) struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; char *hash_algo = NULL; char *private_key_name, *x509_name, *module_name, *dest_name; - bool save_cms = false, replace_orig; + bool save_sig = false, replace_orig; bool sign_only = false; unsigned char buf[4096]; - unsigned long module_size, cms_size; - unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; + unsigned long module_size, sig_size; + unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#ifndef USE_PKCS7 CMS_ContentInfo *cms; + unsigned int use_keyid = 0; +#else + PKCS7 *pkcs7; +#endif X509 *x509; BIO *b, *bd = NULL, *bm; int opt, n; - OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); ERR_clear_error(); key_pass = getenv("KBUILD_SIGN_PIN"); +#ifndef USE_PKCS7 + use_signed_attrs = CMS_NOATTR; +#else + use_signed_attrs = PKCS7_NOATTR; +#endif + do { opt = getopt(argc, argv, "dpk"); switch (opt) { - case 'p': save_cms = true; break; - case 'd': sign_only = true; save_cms = true; break; + case 'p': save_sig = true; break; + case 'd': sign_only = true; save_sig = true; break; +#ifndef USE_PKCS7 case 'k': use_keyid = CMS_USE_KEYID; break; +#endif case -1: break; default: format(); } @@ -154,6 +190,14 @@ int main(int argc, char **argv) replace_orig = true; } +#ifdef USE_PKCS7 + if (strcmp(hash_algo, "sha1") != 0) { + fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", + OPENSSL_VERSION_TEXT); + exit(3); + } +#endif + /* Read the private key and the X.509 cert the PKCS#7 message * will point to. */ @@ -210,7 +254,8 @@ int main(int argc, char **argv) bm = BIO_new_file(module_name, "rb"); ERR(!bm, "%s", module_name); - /* Load the CMS message from the digest buffer. */ +#ifndef USE_PKCS7 + /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); ERR(!cms, "CMS_sign"); @@ -218,17 +263,31 @@ int main(int argc, char **argv) ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | use_keyid | use_signed_attrs), - "CMS_sign_add_signer"); + "CMS_add1_signer"); ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, "CMS_final"); - if (save_cms) { - char *cms_name; +#else + pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, + PKCS7_NOCERTS | PKCS7_BINARY | + PKCS7_DETACHED | use_signed_attrs); + ERR(!pkcs7, "PKCS7_sign"); +#endif + + if (save_sig) { + char *sig_file_name; - ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); - b = BIO_new_file(cms_name, "wb"); - ERR(!b, "%s", cms_name); - ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); + ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, + "asprintf"); + b = BIO_new_file(sig_file_name, "wb"); + ERR(!b, "%s", sig_file_name); +#ifndef USE_PKCS7 + ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, + "%s", sig_file_name); +#else + ERR(i2d_PKCS7_bio(b, pkcs7) < 0, + "%s", sig_file_name); +#endif BIO_free(b); } @@ -244,9 +303,13 @@ int main(int argc, char **argv) ERR(n < 0, "%s", module_name); module_size = BIO_number_written(bd); +#ifndef USE_PKCS7 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); - cms_size = BIO_number_written(bd) - module_size; - sig_info.sig_len = htonl(cms_size); +#else + ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); +#endif + sig_size = BIO_number_written(bd) - module_size; + sig_info.sig_len = htonl(sig_size); ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 73455089feef..03c1652c9a1f 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -401,7 +401,7 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, bool match = false; RCU_LOCKDEP_WARN(!rcu_read_lock_held() && - lockdep_is_held(&devcgroup_mutex), + !lockdep_is_held(&devcgroup_mutex), "device_cgroup:verify_new_ex called without proper synchronization"); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..addf060399e0 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); + /* Throw away the key data if the key is instantiated */ + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && + !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && + key->type->destroy) + key->type->destroy(key); + security_key_free(key); /* deal with the user's key tracking and quota */ @@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - key_user_put(key->user); kfree(key->description); diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 486ef6fa393b..0d6253124278 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -440,6 +440,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, kenter(""); + if (ctx->index_key.type == &key_type_keyring) + return ERR_PTR(-EPERM); + user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig index 885683a3b0bd..e0406211716b 100644 --- a/sound/arm/Kconfig +++ b/sound/arm/Kconfig @@ -9,6 +9,14 @@ menuconfig SND_ARM Drivers that are implemented on ASoC can be found in "ALSA for SoC audio support" section. +config SND_PXA2XX_LIB + tristate + select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 + select SND_DMAENGINE_PCM + +config SND_PXA2XX_LIB_AC97 + bool + if SND_ARM config SND_ARMAACI @@ -21,13 +29,6 @@ config SND_PXA2XX_PCM tristate select SND_PCM -config SND_PXA2XX_LIB - tristate - select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 - -config SND_PXA2XX_LIB_AC97 - bool - config SND_PXA2XX_AC97 tristate "AC97 driver for the Intel PXA2xx chip" depends on ARCH_PXA diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c index 4449d1a99089..2433f7c81472 100644 --- a/sound/hda/ext/hdac_ext_bus.c +++ b/sound/hda/ext/hdac_ext_bus.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/slab.h> +#include <linux/io.h> #include <sound/hdaudio_ext.h> MODULE_DESCRIPTION("HDA extended core"); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 37f43a1b34ef..a249d5486889 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3367,10 +3367,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec) int dev, err; err = snd_hda_codec_parse_pcms(codec); - if (err < 0) { - snd_hda_codec_reset(codec); + if (err < 0) return err; - } /* attach a new PCM streams */ list_for_each_entry(cpcm, &codec->pcm_list_head, list) { diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 477742cb70a2..58c0aad37284 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -73,6 +73,7 @@ struct hda_tegra { struct clk *hda2codec_2x_clk; struct clk *hda2hdmi_clk; void __iomem *regs; + struct work_struct probe_work; }; #ifdef CONFIG_PM @@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(struct snd_device *device) static int hda_tegra_dev_free(struct snd_device *device) { struct azx *chip = device->device_data; + struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); + cancel_work_sync(&hda->probe_work); if (azx_bus(chip)->chip_init) { azx_stop_all_streams(chip); azx_stop_chip(chip); @@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) /* * constructor */ + +static void hda_tegra_probe_work(struct work_struct *work); + static int hda_tegra_create(struct snd_card *card, unsigned int driver_caps, struct hda_tegra *hda) @@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_card *card, chip->single_cmd = false; chip->snoop = true; + INIT_WORK(&hda->probe_work, hda_tegra_probe_work); + err = azx_bus_init(chip, NULL, &hda_tegra_io_ops); if (err < 0) return err; @@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platform_device *pdev) card->private_data = chip; dev_set_drvdata(&pdev->dev, card); + schedule_work(&hda->probe_work); + + return 0; + +out_free: + snd_card_free(card); + return err; +} + +static void hda_tegra_probe_work(struct work_struct *work) +{ + struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work); + struct azx *chip = &hda->chip; + struct platform_device *pdev = to_platform_device(hda->dev); + int err; err = hda_tegra_first_init(chip, pdev); if (err < 0) @@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platform_device *pdev) chip->running = 1; snd_hda_set_power_save(&chip->bus, power_save * 1000); - return 0; - -out_free: - snd_card_free(card); - return err; + out_free: + return; /* no error return from async probe */ } static int hda_tegra_remove(struct platform_device *pdev) diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 584a0343ab0c..85813de26da8 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), {} /* terminator */ }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index ca03c40609fc..2f0ec7c45fc7 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a75b5611d1e4..16b8dcba5c12 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec, } } +/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */ +static void alc_fixup_tpt440_dock(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const struct hda_pintbl pincfgs[] = { + { 0x16, 0x21211010 }, /* dock headphone */ + { 0x19, 0x21a11010 }, /* dock mic */ + { } + }; + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; + codec->power_save_node = 0; /* avoid click noises */ + snd_hda_apply_pincfgs(codec, pincfgs); + } +} + static void alc_shutup_dell_xps13(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -4562,7 +4580,6 @@ enum { ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_FIXUP_TPT440_DOCK, - ALC292_FIXUP_TPT440_DOCK2, ALC283_FIXUP_BXBT2807_MIC, ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, @@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fixups[] = { }, [ALC292_FIXUP_TPT440_DOCK] = { .type = HDA_FIXUP_FUNC, - .v.func = alc269_fixup_pincfg_no_hp_to_lineout, - .chained = true, - .chain_id = ALC292_FIXUP_TPT440_DOCK2 - }, - [ALC292_FIXUP_TPT440_DOCK2] = { - .type = HDA_FIXUP_PINS, - .v.pins = (const struct hda_pintbl[]) { - { 0x16, 0x21211010 }, /* dock headphone */ - { 0x19, 0x21a11010 }, /* dock mic */ - { } - }, + .v.func = alc_fixup_tpt440_dock, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, @@ -5299,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9d947aef2c8b..def5cc8dff02 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec) return err; spec = codec->spec; - codec->power_save_node = 1; + /* enable power_save_node only for new 92HD89xx chips, as it causes + * click noises on old 92HD73xx chips. + */ + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670) + codec->power_save_node = 1; spec->linear_tone_beep = 0; spec->gen.mixer_nid = 0x1d; spec->have_spdif_mux = 1; diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 225bfda414e9..7ff7d88e46dd 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -9,7 +9,6 @@ menuconfig SND_SOC select SND_JACK if INPUT=y || INPUT=SND select REGMAP_I2C if I2C select REGMAP_SPI if SPI_MASTER - select SND_COMPRESS_OFFLOAD ---help--- If you want ASoC support, you should say Y here and also to the @@ -30,6 +29,10 @@ config SND_SOC_GENERIC_DMAENGINE_PCM bool select SND_DMAENGINE_PCM +config SND_SOC_COMPRESS + bool + select SND_COMPRESS_OFFLOAD + config SND_SOC_TOPOLOGY bool @@ -58,6 +61,7 @@ source "sound/soc/sh/Kconfig" source "sound/soc/sirf/Kconfig" source "sound/soc/spear/Kconfig" source "sound/soc/sti/Kconfig" +source "sound/soc/sunxi/Kconfig" source "sound/soc/tegra/Kconfig" source "sound/soc/txx9/Kconfig" source "sound/soc/ux500/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 134aca150a50..8eb06db32fa0 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -1,5 +1,6 @@ snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o -snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o +snd-soc-core-objs += soc-pcm.o soc-io.o soc-devres.o soc-ops.o +snd-soc-core-$(CONFIG_SND_SOC_COMPRESS) += soc-compress.o ifneq ($(CONFIG_SND_SOC_TOPOLOGY),) snd-soc-core-objs += soc-topology.o @@ -40,6 +41,7 @@ obj-$(CONFIG_SND_SOC) += sh/ obj-$(CONFIG_SND_SOC) += sirf/ obj-$(CONFIG_SND_SOC) += spear/ obj-$(CONFIG_SND_SOC) += sti/ +obj-$(CONFIG_SND_SOC) += sunxi/ obj-$(CONFIG_SND_SOC) += tegra/ obj-$(CONFIG_SND_SOC) += txx9/ obj-$(CONFIG_SND_SOC) += ux500/ diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c index aa354e1c6ff7..1933bcd46cca 100644 --- a/sound/soc/atmel/atmel_wm8904.c +++ b/sound/soc/atmel/atmel_wm8904.c @@ -176,6 +176,7 @@ static const struct of_device_id atmel_asoc_wm8904_dt_ids[] = { { .compatible = "atmel,asoc-wm8904", }, { } }; +MODULE_DEVICE_TABLE(of, atmel_asoc_wm8904_dt_ids); #endif static struct platform_driver atmel_asoc_wm8904_driver = { diff --git a/sound/soc/au1x/db1000.c b/sound/soc/au1x/db1000.c index 452f404abfd2..e97c32798e98 100644 --- a/sound/soc/au1x/db1000.c +++ b/sound/soc/au1x/db1000.c @@ -38,14 +38,7 @@ static int db1000_audio_probe(struct platform_device *pdev) { struct snd_soc_card *card = &db1000_ac97; card->dev = &pdev->dev; - return snd_soc_register_card(card); -} - -static int db1000_audio_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - snd_soc_unregister_card(card); - return 0; + return devm_snd_soc_register_card(&pdev->dev, card); } static struct platform_driver db1000_audio_driver = { @@ -54,7 +47,6 @@ static struct platform_driver db1000_audio_driver = { .pm = &snd_soc_pm_ops, }, .probe = db1000_audio_probe, - .remove = db1000_audio_remove, }; module_platform_driver(db1000_audio_driver); diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c index 58c3164802b8..5c73061d912a 100644 --- a/sound/soc/au1x/db1200.c +++ b/sound/soc/au1x/db1200.c @@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.2", .platform_name = "au1xpsc-pcm.2", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; @@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.3", .platform_name = "au1xpsc-pcm.3", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; @@ -174,14 +178,7 @@ static int db1200_audio_probe(struct platform_device *pdev) card = db1200_cards[pid->driver_data]; card->dev = &pdev->dev; - return snd_soc_register_card(card); -} - -static int db1200_audio_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - snd_soc_unregister_card(card); - return 0; + return devm_snd_soc_register_card(&pdev->dev, card); } static struct platform_driver db1200_audio_driver = { @@ -191,7 +188,6 @@ static struct platform_driver db1200_audio_driver = { }, .id_table = db1200_pids, .probe = db1200_audio_probe, - .remove = db1200_audio_remove, }; module_platform_driver(db1200_audio_driver); diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c index 38e853add96e..0bf9d62b91a0 100644 --- a/sound/soc/au1x/psc-i2s.c +++ b/sound/soc/au1x/psc-i2s.c @@ -296,7 +296,6 @@ static int au1xpsc_i2s_drvprobe(struct platform_device *pdev) { struct resource *iores, *dmares; unsigned long sel; - int ret; struct au1xpsc_audio_data *wd; wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c index 5bf1501e5e3c..864df2616e10 100644 --- a/sound/soc/blackfin/bf5xx-ad1836.c +++ b/sound/soc/blackfin/bf5xx-ad1836.c @@ -87,27 +87,18 @@ static int bf5xx_ad1836_driver_probe(struct platform_device *pdev) card->dev = &pdev->dev; platform_set_drvdata(pdev, card); - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "Failed to register card\n"); return ret; } -static int bf5xx_ad1836_driver_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static struct platform_driver bf5xx_ad1836_driver = { .driver = { .name = "bfin-snd-ad1836", .pm = &snd_soc_pm_ops, }, .probe = bf5xx_ad1836_driver_probe, - .remove = bf5xx_ad1836_driver_remove, }; module_platform_driver(bf5xx_ad1836_driver); diff --git a/sound/soc/blackfin/bfin-eval-adau1373.c b/sound/soc/blackfin/bfin-eval-adau1373.c index 523baf5820d7..72ac78988426 100644 --- a/sound/soc/blackfin/bfin-eval-adau1373.c +++ b/sound/soc/blackfin/bfin-eval-adau1373.c @@ -154,16 +154,7 @@ static int bfin_eval_adau1373_probe(struct platform_device *pdev) card->dev = &pdev->dev; - return snd_soc_register_card(&bfin_eval_adau1373); -} - -static int bfin_eval_adau1373_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; + return devm_snd_soc_register_card(&pdev->dev, &bfin_eval_adau1373); } static struct platform_driver bfin_eval_adau1373_driver = { @@ -172,7 +163,6 @@ static struct platform_driver bfin_eval_adau1373_driver = { .pm = &snd_soc_pm_ops, }, .probe = bfin_eval_adau1373_probe, - .remove = bfin_eval_adau1373_remove, }; module_platform_driver(bfin_eval_adau1373_driver); diff --git a/sound/soc/blackfin/bfin-eval-adau1701.c b/sound/soc/blackfin/bfin-eval-adau1701.c index f9e926dfd4ef..5c67f72cf9a9 100644 --- a/sound/soc/blackfin/bfin-eval-adau1701.c +++ b/sound/soc/blackfin/bfin-eval-adau1701.c @@ -94,16 +94,7 @@ static int bfin_eval_adau1701_probe(struct platform_device *pdev) card->dev = &pdev->dev; - return snd_soc_register_card(&bfin_eval_adau1701); -} - -static int bfin_eval_adau1701_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; + return devm_snd_soc_register_card(&pdev->dev, &bfin_eval_adau1701); } static struct platform_driver bfin_eval_adau1701_driver = { @@ -112,7 +103,6 @@ static struct platform_driver bfin_eval_adau1701_driver = { .pm = &snd_soc_pm_ops, }, .probe = bfin_eval_adau1701_probe, - .remove = bfin_eval_adau1701_remove, }; module_platform_driver(bfin_eval_adau1701_driver); diff --git a/sound/soc/blackfin/bfin-eval-adav80x.c b/sound/soc/blackfin/bfin-eval-adav80x.c index 27eee66afdb2..1037477d10b2 100644 --- a/sound/soc/blackfin/bfin-eval-adav80x.c +++ b/sound/soc/blackfin/bfin-eval-adav80x.c @@ -119,16 +119,7 @@ static int bfin_eval_adav80x_probe(struct platform_device *pdev) card->dev = &pdev->dev; - return snd_soc_register_card(&bfin_eval_adav80x); -} - -static int bfin_eval_adav80x_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; + return devm_snd_soc_register_card(&pdev->dev, &bfin_eval_adav80x); } static const struct platform_device_id bfin_eval_adav80x_ids[] = { @@ -144,7 +135,6 @@ static struct platform_driver bfin_eval_adav80x_driver = { .pm = &snd_soc_pm_ops, }, .probe = bfin_eval_adav80x_probe, - .remove = bfin_eval_adav80x_remove, .id_table = bfin_eval_adav80x_ids, }; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 0c9733ecd17f..70e5a75901aa 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -36,6 +36,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_AK4104 if SPI_MASTER select SND_SOC_AK4535 if I2C select SND_SOC_AK4554 + select SND_SOC_AK4613 if I2C select SND_SOC_AK4641 if I2C select SND_SOC_AK4642 if I2C select SND_SOC_AK4671 if I2C @@ -79,7 +80,6 @@ config SND_SOC_ALL_CODECS select SND_SOC_MAX9877 if I2C select SND_SOC_MC13783 if MFD_MC13XXX select SND_SOC_ML26124 if I2C - select SND_SOC_HDMI_CODEC select SND_SOC_PCM1681 if I2C select SND_SOC_PCM1792A if SPI_MASTER select SND_SOC_PCM3008 @@ -319,6 +319,10 @@ config SND_SOC_AK4535 config SND_SOC_AK4554 tristate "AKM AK4554 CODEC" +config SND_SOC_AK4613 + tristate "AKM AK4613 CODEC" + depends on I2C + config SND_SOC_AK4641 tristate @@ -442,9 +446,6 @@ config SND_SOC_BT_SCO config SND_SOC_DMIC tristate -config SND_SOC_HDMI_CODEC - tristate "HDMI stub CODEC" - config SND_SOC_ES8328 tristate "Everest Semi ES8328 CODEC" diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 4a32077954ae..be1491acb6ae 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -26,6 +26,7 @@ snd-soc-ads117x-objs := ads117x.o snd-soc-ak4104-objs := ak4104.o snd-soc-ak4535-objs := ak4535.o snd-soc-ak4554-objs := ak4554.o +snd-soc-ak4613-objs := ak4613.o snd-soc-ak4641-objs := ak4641.o snd-soc-ak4642-objs := ak4642.o snd-soc-ak4671-objs := ak4671.o @@ -72,7 +73,6 @@ snd-soc-max98925-objs := max98925.o snd-soc-max9850-objs := max9850.o snd-soc-mc13783-objs := mc13783.o snd-soc-ml26124-objs := ml26124.o -snd-soc-hdmi-codec-objs := hdmi.o snd-soc-pcm1681-objs := pcm1681.o snd-soc-pcm1792a-codec-objs := pcm1792a.o snd-soc-pcm3008-objs := pcm3008.o @@ -216,6 +216,7 @@ obj-$(CONFIG_SND_SOC_ADS117X) += snd-soc-ads117x.o obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o obj-$(CONFIG_SND_SOC_AK4554) += snd-soc-ak4554.o +obj-$(CONFIG_SND_SOC_AK4613) += snd-soc-ak4613.o obj-$(CONFIG_SND_SOC_AK4641) += snd-soc-ak4641.o obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o @@ -264,7 +265,6 @@ obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o -obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o obj-$(CONFIG_SND_SOC_PCM1792A) += snd-soc-pcm1792a-codec.o obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c new file mode 100644 index 000000000000..07a266460ec3 --- /dev/null +++ b/sound/soc/codecs/ak4613.c @@ -0,0 +1,497 @@ +/* + * ak4613.c -- Asahi Kasei ALSA Soc Audio driver + * + * Copyright (C) 2015 Renesas Electronics Corporation + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * Based on ak4642.c by Kuninori Morimoto + * Based on wm8731.c by Richard Purdie + * Based on ak4535.c by Richard Purdie + * Based on wm8753.c by Liam Girdwood + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/of_device.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <sound/soc.h> +#include <sound/pcm_params.h> +#include <sound/tlv.h> + +#define PW_MGMT1 0x00 /* Power Management 1 */ +#define PW_MGMT2 0x01 /* Power Management 2 */ +#define PW_MGMT3 0x02 /* Power Management 3 */ +#define CTRL1 0x03 /* Control 1 */ +#define CTRL2 0x04 /* Control 2 */ +#define DEMP1 0x05 /* De-emphasis1 */ +#define DEMP2 0x06 /* De-emphasis2 */ +#define OFD 0x07 /* Overflow Detect */ +#define ZRD 0x08 /* Zero Detect */ +#define ICTRL 0x09 /* Input Control */ +#define OCTRL 0x0a /* Output Control */ +#define LOUT1 0x0b /* LOUT1 Volume Control */ +#define ROUT1 0x0c /* ROUT1 Volume Control */ +#define LOUT2 0x0d /* LOUT2 Volume Control */ +#define ROUT2 0x0e /* ROUT2 Volume Control */ +#define LOUT3 0x0f /* LOUT3 Volume Control */ +#define ROUT3 0x10 /* ROUT3 Volume Control */ +#define LOUT4 0x11 /* LOUT4 Volume Control */ +#define ROUT4 0x12 /* ROUT4 Volume Control */ +#define LOUT5 0x13 /* LOUT5 Volume Control */ +#define ROUT5 0x14 /* ROUT5 Volume Control */ +#define LOUT6 0x15 /* LOUT6 Volume Control */ +#define ROUT6 0x16 /* ROUT6 Volume Control */ + +/* PW_MGMT1 */ +#define RSTN BIT(0) +#define PMDAC BIT(1) +#define PMADC BIT(2) +#define PMVR BIT(3) + +/* PW_MGMT2 */ +#define PMAD_ALL 0x7 + +/* PW_MGMT3 */ +#define PMDA_ALL 0x3f + +/* CTRL1 */ +#define DIF0 BIT(3) +#define DIF1 BIT(4) +#define DIF2 BIT(5) +#define TDM0 BIT(6) +#define TDM1 BIT(7) +#define NO_FMT (0xff) +#define FMT_MASK (0xf8) + +/* CTRL2 */ +#define DFS_NORMAL_SPEED (0 << 2) +#define DFS_DOUBLE_SPEED (1 << 2) +#define DFS_QUAD_SPEED (2 << 2) + +struct ak4613_priv { + struct mutex lock; + + unsigned int fmt; + u8 fmt_ctrl; + int cnt; +}; + +struct ak4613_formats { + unsigned int width; + unsigned int fmt; +}; + +struct ak4613_interface { + struct ak4613_formats capture; + struct ak4613_formats playback; +}; + +/* + * Playback Volume + * + * max : 0x00 : 0 dB + * ( 0.5 dB step ) + * min : 0xFE : -127.0 dB + * mute: 0xFF + */ +static const DECLARE_TLV_DB_SCALE(out_tlv, -12750, 50, 1); + +static const struct snd_kcontrol_new ak4613_snd_controls[] = { + SOC_DOUBLE_R_TLV("Digital Playback Volume1", LOUT1, ROUT1, + 0, 0xFF, 1, out_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume2", LOUT2, ROUT2, + 0, 0xFF, 1, out_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume3", LOUT3, ROUT3, + 0, 0xFF, 1, out_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume4", LOUT4, ROUT4, + 0, 0xFF, 1, out_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume5", LOUT5, ROUT5, + 0, 0xFF, 1, out_tlv), + SOC_DOUBLE_R_TLV("Digital Playback Volume6", LOUT6, ROUT6, + 0, 0xFF, 1, out_tlv), +}; + +static const struct reg_default ak4613_reg[] = { + { 0x0, 0x0f }, { 0x1, 0x07 }, { 0x2, 0x3f }, { 0x3, 0x20 }, + { 0x4, 0x20 }, { 0x5, 0x55 }, { 0x6, 0x05 }, { 0x7, 0x07 }, + { 0x8, 0x0f }, { 0x9, 0x07 }, { 0xa, 0x3f }, { 0xb, 0x00 }, + { 0xc, 0x00 }, { 0xd, 0x00 }, { 0xe, 0x00 }, { 0xf, 0x00 }, + { 0x10, 0x00 }, { 0x11, 0x00 }, { 0x12, 0x00 }, { 0x13, 0x00 }, + { 0x14, 0x00 }, { 0x15, 0x00 }, { 0x16, 0x00 }, +}; + +#define AUDIO_IFACE_IDX_TO_VAL(i) (i << 3) +#define AUDIO_IFACE(b, fmt) { b, SND_SOC_DAIFMT_##fmt } +static const struct ak4613_interface ak4613_iface[] = { + /* capture */ /* playback */ + [0] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(16, RIGHT_J) }, + [1] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(20, RIGHT_J) }, + [2] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(24, RIGHT_J) }, + [3] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(24, LEFT_J) }, + [4] = { AUDIO_IFACE(24, I2S), AUDIO_IFACE(24, I2S) }, +}; + +static const struct regmap_config ak4613_regmap_cfg = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x16, + .reg_defaults = ak4613_reg, + .num_reg_defaults = ARRAY_SIZE(ak4613_reg), +}; + +static const struct of_device_id ak4613_of_match[] = { + { .compatible = "asahi-kasei,ak4613", .data = &ak4613_regmap_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(of, ak4613_of_match); + +static const struct i2c_device_id ak4613_i2c_id[] = { + { "ak4613", (kernel_ulong_t)&ak4613_regmap_cfg }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ak4613_i2c_id); + +static const struct snd_soc_dapm_widget ak4613_dapm_widgets[] = { + + /* Outputs */ + SND_SOC_DAPM_OUTPUT("LOUT1"), + SND_SOC_DAPM_OUTPUT("LOUT2"), + SND_SOC_DAPM_OUTPUT("LOUT3"), + SND_SOC_DAPM_OUTPUT("LOUT4"), + SND_SOC_DAPM_OUTPUT("LOUT5"), + SND_SOC_DAPM_OUTPUT("LOUT6"), + + SND_SOC_DAPM_OUTPUT("ROUT1"), + SND_SOC_DAPM_OUTPUT("ROUT2"), + SND_SOC_DAPM_OUTPUT("ROUT3"), + SND_SOC_DAPM_OUTPUT("ROUT4"), + SND_SOC_DAPM_OUTPUT("ROUT5"), + SND_SOC_DAPM_OUTPUT("ROUT6"), + + /* Inputs */ + SND_SOC_DAPM_INPUT("LIN1"), + SND_SOC_DAPM_INPUT("LIN2"), + + SND_SOC_DAPM_INPUT("RIN1"), + SND_SOC_DAPM_INPUT("RIN2"), + + /* DAC */ + SND_SOC_DAPM_DAC("DAC1", NULL, PW_MGMT3, 0, 0), + SND_SOC_DAPM_DAC("DAC2", NULL, PW_MGMT3, 1, 0), + SND_SOC_DAPM_DAC("DAC3", NULL, PW_MGMT3, 2, 0), + SND_SOC_DAPM_DAC("DAC4", NULL, PW_MGMT3, 3, 0), + SND_SOC_DAPM_DAC("DAC5", NULL, PW_MGMT3, 4, 0), + SND_SOC_DAPM_DAC("DAC6", NULL, PW_MGMT3, 5, 0), + + /* ADC */ + SND_SOC_DAPM_ADC("ADC1", NULL, PW_MGMT2, 0, 0), + SND_SOC_DAPM_ADC("ADC2", NULL, PW_MGMT2, 1, 0), +}; + +static const struct snd_soc_dapm_route ak4613_intercon[] = { + {"LOUT1", NULL, "DAC1"}, + {"LOUT2", NULL, "DAC2"}, + {"LOUT3", NULL, "DAC3"}, + {"LOUT4", NULL, "DAC4"}, + {"LOUT5", NULL, "DAC5"}, + {"LOUT6", NULL, "DAC6"}, + + {"ROUT1", NULL, "DAC1"}, + {"ROUT2", NULL, "DAC2"}, + {"ROUT3", NULL, "DAC3"}, + {"ROUT4", NULL, "DAC4"}, + {"ROUT5", NULL, "DAC5"}, + {"ROUT6", NULL, "DAC6"}, + + {"DAC1", NULL, "Playback"}, + {"DAC2", NULL, "Playback"}, + {"DAC3", NULL, "Playback"}, + {"DAC4", NULL, "Playback"}, + {"DAC5", NULL, "Playback"}, + {"DAC6", NULL, "Playback"}, + + {"Capture", NULL, "ADC1"}, + {"Capture", NULL, "ADC2"}, + + {"ADC1", NULL, "LIN1"}, + {"ADC2", NULL, "LIN2"}, + + {"ADC1", NULL, "RIN1"}, + {"ADC2", NULL, "RIN2"}, +}; + +static void ak4613_dai_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct ak4613_priv *priv = snd_soc_codec_get_drvdata(codec); + struct device *dev = codec->dev; + + mutex_lock(&priv->lock); + priv->cnt--; + if (priv->cnt < 0) { + dev_err(dev, "unexpected counter error\n"); + priv->cnt = 0; + } + if (!priv->cnt) + priv->fmt_ctrl = NO_FMT; + mutex_unlock(&priv->lock); +} + +static int ak4613_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct snd_soc_codec *codec = dai->codec; + struct ak4613_priv *priv = snd_soc_codec_get_drvdata(codec); + + fmt &= SND_SOC_DAIFMT_FORMAT_MASK; + + switch (fmt) { + case SND_SOC_DAIFMT_RIGHT_J: + case SND_SOC_DAIFMT_LEFT_J: + case SND_SOC_DAIFMT_I2S: + priv->fmt = fmt; + + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ak4613_dai_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct ak4613_priv *priv = snd_soc_codec_get_drvdata(codec); + const struct ak4613_formats *fmts; + struct device *dev = codec->dev; + unsigned int width = params_width(params); + unsigned int fmt = priv->fmt; + unsigned int rate; + int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + int i, ret; + u8 fmt_ctrl, ctrl2; + + rate = params_rate(params); + switch (rate) { + case 32000: + case 44100: + case 48000: + ctrl2 = DFS_NORMAL_SPEED; + break; + case 88200: + case 96000: + ctrl2 = DFS_DOUBLE_SPEED; + break; + case 176400: + case 192000: + ctrl2 = DFS_QUAD_SPEED; + break; + default: + return -EINVAL; + } + + /* + * FIXME + * + * It doesn't support TDM at this point + */ + fmt_ctrl = NO_FMT; + for (i = 0; i < ARRAY_SIZE(ak4613_iface); i++) { + fmts = (is_play) ? &ak4613_iface[i].playback : + &ak4613_iface[i].capture; + + if (fmts->fmt != fmt) + continue; + + if (fmt == SND_SOC_DAIFMT_RIGHT_J) { + if (fmts->width != width) + continue; + } else { + if (fmts->width < width) + continue; + } + + fmt_ctrl = AUDIO_IFACE_IDX_TO_VAL(i); + break; + } + + ret = -EINVAL; + if (fmt_ctrl == NO_FMT) + goto hw_params_end; + + mutex_lock(&priv->lock); + if ((priv->fmt_ctrl == NO_FMT) || + (priv->fmt_ctrl == fmt_ctrl)) { + priv->fmt_ctrl = fmt_ctrl; + priv->cnt++; + ret = 0; + } + mutex_unlock(&priv->lock); + + if (ret < 0) + goto hw_params_end; + + snd_soc_update_bits(codec, CTRL1, FMT_MASK, fmt_ctrl); + snd_soc_write(codec, CTRL2, ctrl2); + +hw_params_end: + if (ret < 0) + dev_warn(dev, "unsupported data width/format combination\n"); + + return ret; +} + +static int ak4613_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + u8 mgmt1 = 0; + + switch (level) { + case SND_SOC_BIAS_ON: + mgmt1 |= RSTN; + /* fall through */ + case SND_SOC_BIAS_PREPARE: + mgmt1 |= PMADC | PMDAC; + /* fall through */ + case SND_SOC_BIAS_STANDBY: + mgmt1 |= PMVR; + /* fall through */ + case SND_SOC_BIAS_OFF: + default: + break; + } + + snd_soc_write(codec, PW_MGMT1, mgmt1); + + return 0; +} + +static const struct snd_soc_dai_ops ak4613_dai_ops = { + .shutdown = ak4613_dai_shutdown, + .set_fmt = ak4613_dai_set_fmt, + .hw_params = ak4613_dai_hw_params, +}; + +#define AK4613_PCM_RATE (SNDRV_PCM_RATE_32000 |\ + SNDRV_PCM_RATE_44100 |\ + SNDRV_PCM_RATE_48000 |\ + SNDRV_PCM_RATE_64000 |\ + SNDRV_PCM_RATE_88200 |\ + SNDRV_PCM_RATE_96000 |\ + SNDRV_PCM_RATE_176400 |\ + SNDRV_PCM_RATE_192000) +#define AK4613_PCM_FMTBIT (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +static struct snd_soc_dai_driver ak4613_dai = { + .name = "ak4613-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = AK4613_PCM_RATE, + .formats = AK4613_PCM_FMTBIT, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = AK4613_PCM_RATE, + .formats = AK4613_PCM_FMTBIT, + }, + .ops = &ak4613_dai_ops, + .symmetric_rates = 1, +}; + +static int ak4613_resume(struct snd_soc_codec *codec) +{ + struct regmap *regmap = dev_get_regmap(codec->dev, NULL); + + regcache_mark_dirty(regmap); + return regcache_sync(regmap); +} + +static struct snd_soc_codec_driver soc_codec_dev_ak4613 = { + .resume = ak4613_resume, + .set_bias_level = ak4613_set_bias_level, + .controls = ak4613_snd_controls, + .num_controls = ARRAY_SIZE(ak4613_snd_controls), + .dapm_widgets = ak4613_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(ak4613_dapm_widgets), + .dapm_routes = ak4613_intercon, + .num_dapm_routes = ARRAY_SIZE(ak4613_intercon), +}; + +static int ak4613_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &i2c->dev; + struct device_node *np = dev->of_node; + const struct regmap_config *regmap_cfg; + struct regmap *regmap; + struct ak4613_priv *priv; + + regmap_cfg = NULL; + if (np) { + const struct of_device_id *of_id; + + of_id = of_match_device(ak4613_of_match, dev); + if (of_id) + regmap_cfg = of_id->data; + } else { + regmap_cfg = (const struct regmap_config *)id->driver_data; + } + + if (!regmap_cfg) + return -EINVAL; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->fmt_ctrl = NO_FMT; + priv->cnt = 0; + + mutex_init(&priv->lock); + + i2c_set_clientdata(i2c, priv); + + regmap = devm_regmap_init_i2c(i2c, regmap_cfg); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + return snd_soc_register_codec(dev, &soc_codec_dev_ak4613, + &ak4613_dai, 1); +} + +static int ak4613_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + return 0; +} + +static struct i2c_driver ak4613_i2c_driver = { + .driver = { + .name = "ak4613-codec", + .owner = THIS_MODULE, + .of_match_table = ak4613_of_match, + }, + .probe = ak4613_i2c_probe, + .remove = ak4613_i2c_remove, + .id_table = ak4613_i2c_id, +}; + +module_i2c_driver(ak4613_i2c_driver); + +MODULE_DESCRIPTION("Soc AK4613 driver"); +MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c index 4a90143d0e90..cda27c22812a 100644 --- a/sound/soc/codecs/ak4642.c +++ b/sound/soc/codecs/ak4642.c @@ -23,6 +23,8 @@ * AK4648 is tested. */ +#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> @@ -128,11 +130,8 @@ #define I2S (3 << 0) /* MD_CTL2 */ -#define FS0 (1 << 0) -#define FS1 (1 << 1) -#define FS2 (1 << 2) -#define FS3 (1 << 5) -#define FS_MASK (FS0 | FS1 | FS2 | FS3) +#define FSs(val) (((val & 0x7) << 0) | ((val & 0x8) << 2)) +#define PSs(val) ((val & 0x3) << 6) /* MD_CTL3 */ #define BST1 (1 << 3) @@ -147,6 +146,7 @@ struct ak4642_drvdata { struct ak4642_priv { const struct ak4642_drvdata *drvdata; + struct clk *mcko; }; /* @@ -430,56 +430,56 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return 0; } +static int ak4642_set_mcko(struct snd_soc_codec *codec, + u32 frequency) +{ + u32 fs_list[] = { + [0] = 8000, + [1] = 12000, + [2] = 16000, + [3] = 24000, + [4] = 7350, + [5] = 11025, + [6] = 14700, + [7] = 22050, + [10] = 32000, + [11] = 48000, + [14] = 29400, + [15] = 44100, + }; + u32 ps_list[] = { + [0] = 256, + [1] = 128, + [2] = 64, + [3] = 32 + }; + int ps, fs; + + for (ps = 0; ps < ARRAY_SIZE(ps_list); ps++) { + for (fs = 0; fs < ARRAY_SIZE(fs_list); fs++) { + if (frequency == ps_list[ps] * fs_list[fs]) { + snd_soc_write(codec, MD_CTL2, + PSs(ps) | FSs(fs)); + return 0; + } + } + } + + return 0; +} + static int ak4642_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; - u8 rate; + struct ak4642_priv *priv = snd_soc_codec_get_drvdata(codec); + u32 rate = clk_get_rate(priv->mcko); - switch (params_rate(params)) { - case 7350: - rate = FS2; - break; - case 8000: - rate = 0; - break; - case 11025: - rate = FS2 | FS0; - break; - case 12000: - rate = FS0; - break; - case 14700: - rate = FS2 | FS1; - break; - case 16000: - rate = FS1; - break; - case 22050: - rate = FS2 | FS1 | FS0; - break; - case 24000: - rate = FS1 | FS0; - break; - case 29400: - rate = FS3 | FS2 | FS1; - break; - case 32000: - rate = FS3 | FS1; - break; - case 44100: - rate = FS3 | FS2 | FS1 | FS0; - break; - case 48000: - rate = FS3 | FS1 | FS0; - break; - default: - return -EINVAL; - } - snd_soc_update_bits(codec, MD_CTL2, FS_MASK, rate); + if (!rate) + rate = params_rate(params) * 256; - return 0; + return ak4642_set_mcko(codec, rate); } static int ak4642_set_bias_level(struct snd_soc_codec *codec, @@ -532,7 +532,18 @@ static int ak4642_resume(struct snd_soc_codec *codec) return 0; } +static int ak4642_probe(struct snd_soc_codec *codec) +{ + struct ak4642_priv *priv = snd_soc_codec_get_drvdata(codec); + + if (priv->mcko) + ak4642_set_mcko(codec, clk_get_rate(priv->mcko)); + + return 0; +} + static struct snd_soc_codec_driver soc_codec_dev_ak4642 = { + .probe = ak4642_probe, .resume = ak4642_resume, .set_bias_level = ak4642_set_bias_level, .controls = ak4642_snd_controls, @@ -580,19 +591,54 @@ static const struct ak4642_drvdata ak4648_drvdata = { .extended_frequencies = 1, }; +#ifdef CONFIG_COMMON_CLK +static struct clk *ak4642_of_parse_mcko(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct clk *clk; + const char *clk_name = np->name; + const char *parent_clk_name = NULL; + u32 rate; + + if (of_property_read_u32(np, "clock-frequency", &rate)) + return NULL; + + if (of_property_read_bool(np, "clocks")) + parent_clk_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &clk_name); + + clk = clk_register_fixed_rate(dev, clk_name, parent_clk_name, + (parent_clk_name) ? 0 : CLK_IS_ROOT, + rate); + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); + + return clk; +} +#else +#define ak4642_of_parse_mcko(d) 0 +#endif + static const struct of_device_id ak4642_of_match[]; static int ak4642_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { - struct device_node *np = i2c->dev.of_node; + struct device *dev = &i2c->dev; + struct device_node *np = dev->of_node; const struct ak4642_drvdata *drvdata = NULL; struct regmap *regmap; struct ak4642_priv *priv; + struct clk *mcko = NULL; if (np) { const struct of_device_id *of_id; - of_id = of_match_device(ak4642_of_match, &i2c->dev); + mcko = ak4642_of_parse_mcko(dev); + if (IS_ERR(mcko)) + mcko = NULL; + + of_id = of_match_device(ak4642_of_match, dev); if (of_id) drvdata = of_id->data; } else { @@ -600,15 +646,16 @@ static int ak4642_i2c_probe(struct i2c_client *i2c, } if (!drvdata) { - dev_err(&i2c->dev, "Unknown device type\n"); + dev_err(dev, "Unknown device type\n"); return -EINVAL; } - priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->drvdata = drvdata; + priv->mcko = mcko; i2c_set_clientdata(i2c, priv); @@ -616,7 +663,7 @@ static int ak4642_i2c_probe(struct i2c_client *i2c, if (IS_ERR(regmap)) return PTR_ERR(regmap); - return snd_soc_register_codec(&i2c->dev, + return snd_soc_register_codec(dev, &soc_codec_dev_ak4642, &ak4642_dai, 1); } diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 8a2221ab3d10..ac21b85ff75f 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -147,6 +147,8 @@ static int arizona_spk_ev(struct snd_soc_dapm_widget *w, 0x4f5, 0x0da); } break; + default: + break; } return 0; @@ -689,6 +691,15 @@ static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena) ARIZONA_IN_VU, val); } +bool arizona_input_analog(struct snd_soc_codec *codec, int shift) +{ + unsigned int reg = ARIZONA_IN1L_CONTROL + ((shift / 2) * 8); + unsigned int val = snd_soc_read(codec, reg); + + return !(val & ARIZONA_IN1_MODE_MASK); +} +EXPORT_SYMBOL_GPL(arizona_input_analog); + int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -725,6 +736,9 @@ int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, reg = snd_soc_read(codec, ARIZONA_INPUT_ENABLES); if (reg == 0) arizona_in_set_vu(codec, 0); + break; + default: + break; } return 0; @@ -806,6 +820,8 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, break; } break; + default: + break; } return 0; diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h index ada0a418ff4b..7b68d05a0939 100644 --- a/sound/soc/codecs/arizona.h +++ b/sound/soc/codecs/arizona.h @@ -294,4 +294,6 @@ extern int arizona_init_dai(struct arizona_priv *priv, int dai); int arizona_set_output_mode(struct snd_soc_codec *codec, int output, bool diff); +extern bool arizona_input_analog(struct snd_soc_codec *codec, int shift); + #endif diff --git a/sound/soc/codecs/hdmi.c b/sound/soc/codecs/hdmi.c deleted file mode 100644 index bd42ad34e004..000000000000 --- a/sound/soc/codecs/hdmi.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * ALSA SoC codec driver for HDMI audio codecs. - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Ricardo Neri <ricardo.neri@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ -#include <linux/module.h> -#include <sound/soc.h> -#include <linux/of.h> -#include <linux/of_device.h> - -#define DRV_NAME "hdmi-audio-codec" - -static const struct snd_soc_dapm_widget hdmi_widgets[] = { - SND_SOC_DAPM_INPUT("RX"), - SND_SOC_DAPM_OUTPUT("TX"), -}; - -static const struct snd_soc_dapm_route hdmi_routes[] = { - { "Capture", NULL, "RX" }, - { "TX", NULL, "Playback" }, -}; - -static struct snd_soc_dai_driver hdmi_codec_dai = { - .name = "hdmi-hifi", - .playback = { - .stream_name = "Playback", - .channels_min = 2, - .channels_max = 8, - .rates = SNDRV_PCM_RATE_32000 | - SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, - .sig_bits = 24, - }, - .capture = { - .stream_name = "Capture", - .channels_min = 2, - .channels_max = 2, - .rates = SNDRV_PCM_RATE_32000 | - SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE, - }, - -}; - -#ifdef CONFIG_OF -static const struct of_device_id hdmi_audio_codec_ids[] = { - { .compatible = "linux,hdmi-audio", }, - { } -}; -MODULE_DEVICE_TABLE(of, hdmi_audio_codec_ids); -#endif - -static struct snd_soc_codec_driver hdmi_codec = { - .dapm_widgets = hdmi_widgets, - .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets), - .dapm_routes = hdmi_routes, - .num_dapm_routes = ARRAY_SIZE(hdmi_routes), - .ignore_pmdown_time = true, -}; - -static int hdmi_codec_probe(struct platform_device *pdev) -{ - return snd_soc_register_codec(&pdev->dev, &hdmi_codec, - &hdmi_codec_dai, 1); -} - -static int hdmi_codec_remove(struct platform_device *pdev) -{ - snd_soc_unregister_codec(&pdev->dev); - return 0; -} - -static struct platform_driver hdmi_codec_driver = { - .driver = { - .name = DRV_NAME, - .of_match_table = of_match_ptr(hdmi_audio_codec_ids), - }, - - .probe = hdmi_codec_probe, - .remove = hdmi_codec_remove, -}; - -module_platform_driver(hdmi_codec_driver); - -MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>"); -MODULE_DESCRIPTION("ASoC generic HDMI codec driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index 3c2f0f8d6266..f823eb502367 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c @@ -50,24 +50,24 @@ struct rt298_priv { }; static struct reg_default rt298_index_def[] = { - { 0x01, 0xaaaa }, - { 0x02, 0x8aaa }, + { 0x01, 0xa5a8 }, + { 0x02, 0x8e95 }, { 0x03, 0x0002 }, - { 0x04, 0xaf01 }, - { 0x08, 0x000d }, - { 0x09, 0xd810 }, - { 0x0a, 0x0120 }, + { 0x04, 0xaf67 }, + { 0x08, 0x200f }, + { 0x09, 0xd010 }, + { 0x0a, 0x0100 }, { 0x0b, 0x0000 }, { 0x0d, 0x2800 }, - { 0x0f, 0x0000 }, - { 0x19, 0x0a17 }, + { 0x0f, 0x0022 }, + { 0x19, 0x0217 }, { 0x20, 0x0020 }, { 0x33, 0x0208 }, { 0x46, 0x0300 }, - { 0x49, 0x0004 }, - { 0x4f, 0x50e9 }, - { 0x50, 0x2000 }, - { 0x63, 0x2902 }, + { 0x49, 0x4004 }, + { 0x4f, 0x50c9 }, + { 0x50, 0x3000 }, + { 0x63, 0x1b02 }, { 0x67, 0x1111 }, { 0x68, 0x1016 }, { 0x69, 0x273f }, @@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c, mdelay(10); if (!rt298->pdata.gpio2_en) - regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); + regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40); else regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index f046bb83fd97..28132375e427 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -619,11 +619,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = { RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ - SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, + SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1, RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), - SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, - RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, + SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2, + RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), /* I2S2 function select */ @@ -849,14 +849,14 @@ static const struct snd_kcontrol_new rt5645_mono_adc_r_mix[] = { static const struct snd_kcontrol_new rt5645_dac_l_mix[] = { SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, RT5645_M_ADCMIX_L_SFT, 1, 1), - SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, + SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER, RT5645_M_DAC1_L_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5645_dac_r_mix[] = { SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, RT5645_M_ADCMIX_R_SFT, 1, 1), - SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, + SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER, RT5645_M_DAC1_R_SFT, 1, 1), }; @@ -1498,7 +1498,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on) regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_MAMP_INT_REG2, 0xfc00); snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); - mdelay(5); + msleep(40); rt5645->hp_on = true; } else { /* depop parameters */ @@ -2958,6 +2958,9 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert) } else { /* jack out */ rt5645->jack_type = 0; + regmap_update_bits(rt5645->regmap, RT5645_HP_VOL, + RT5645_L_MUTE | RT5645_R_MUTE, + RT5645_L_MUTE | RT5645_R_MUTE); regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD); regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 7cea2826279d..093e46d559fb 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -39,8 +39,8 @@ #define RT5645_STO1_ADC_DIG_VOL 0x1c #define RT5645_MONO_ADC_DIG_VOL 0x1d #define RT5645_ADC_BST_VOL1 0x1e -/* Mixer - D-D */ #define RT5645_ADC_BST_VOL2 0x20 +/* Mixer - D-D */ #define RT5645_STO1_ADC_MIXER 0x27 #define RT5645_MONO_ADC_MIXER 0x28 #define RT5645_AD_DA_MIXER 0x29 @@ -315,12 +315,14 @@ #define RT5645_STO1_ADC_R_BST_SFT 12 #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) #define RT5645_STO1_ADC_COMP_SFT 10 -#define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) -#define RT5645_STO2_ADC_L_BST_SFT 8 -#define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) -#define RT5645_STO2_ADC_R_BST_SFT 6 -#define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) -#define RT5645_STO2_ADC_COMP_SFT 4 + +/* ADC Boost Volume Control (0x20) */ +#define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14) +#define RT5645_MONO_ADC_L_BST_SFT 14 +#define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12) +#define RT5645_MONO_ADC_R_BST_SFT 12 +#define RT5645_MONO_ADC_COMP_MASK (0x3 << 10) +#define RT5645_MONO_ADC_COMP_SFT 10 /* Stereo2 ADC Mixer Control (0x26) */ #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index bfda25ef0dd4..f540f82b1f27 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, - SGTL5000_BIAS_R_MASK, - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); + SGTL5000_BIAS_VOLT_MASK, + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT); /* * disable DAP * TODO: @@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, else { sgtl5000->micbias_voltage = 0; dev_err(&client->dev, - "Unsuitable MicBias resistor\n"); + "Unsuitable MicBias voltage\n"); } } else { sgtl5000->micbias_voltage = 0; diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c index e3a0bca28bcf..cc1d3981fa4b 100644 --- a/sound/soc/codecs/tas2552.c +++ b/sound/soc/codecs/tas2552.c @@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = { /* * DAC digital volumes. From -7 to 24 dB in 1 dB steps */ -static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); +static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); static const char * const tas2552_din_source_select[] = { "Muted", diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 1a82b19b2644..a564759845f9 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -80,6 +80,7 @@ struct aic3x_priv { unsigned int sysclk; unsigned int dai_fmt; unsigned int tdm_delay; + unsigned int slot_width; struct list_head list; int master; int gpio_reset; @@ -1025,10 +1026,14 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream, u8 data, j, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1; u16 d, pll_d = 1; int clk; + int width = aic3x->slot_width; + + if (!width) + width = params_width(params); /* select data word length */ data = snd_soc_read(codec, AIC3X_ASD_INTF_CTRLB) & (~(0x3 << 4)); - switch (params_width(params)) { + switch (width) { case 16: break; case 20: @@ -1170,12 +1175,16 @@ static int aic3x_prepare(struct snd_pcm_substream *substream, struct snd_soc_codec *codec = dai->codec; struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec); int delay = 0; + int width = aic3x->slot_width; + + if (!width) + width = substream->runtime->sample_bits; /* TDM slot selection only valid in DSP_A/_B mode */ if (aic3x->dai_fmt == SND_SOC_DAIFMT_DSP_A) - delay += (aic3x->tdm_delay + 1); + delay += (aic3x->tdm_delay*width + 1); else if (aic3x->dai_fmt == SND_SOC_DAIFMT_DSP_B) - delay += aic3x->tdm_delay; + delay += aic3x->tdm_delay*width; /* Configure data delay */ snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay); @@ -1296,7 +1305,20 @@ static int aic3x_set_dai_tdm_slot(struct snd_soc_dai *codec_dai, return -EINVAL; } - aic3x->tdm_delay = lsb * slot_width; + switch (slot_width) { + case 16: + case 20: + case 24: + case 32: + break; + default: + dev_err(codec->dev, "Unsupported slot width %d\n", slot_width); + return -EINVAL; + } + + + aic3x->tdm_delay = lsb; + aic3x->slot_width = slot_width; /* DOUT in high-impedance on inactive bit clocks */ snd_soc_update_bits(codec, AIC3X_ASD_INTF_CTRLA, @@ -1509,14 +1531,17 @@ static int aic3x_init(struct snd_soc_codec *codec) snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); - /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); - /* Line2 Line Out default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + /* On tlv320aic3104, these registers are reserved and must not be written */ + if (aic3x->model != AIC3X_MODEL_3104) { + /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); + /* Line2 Line Out default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + } switch (aic3x->model) { case AIC3X_MODEL_3X: diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index f2c6ad4b8fde..581ec1502228 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -577,7 +577,6 @@ static int wm0010_boot(struct snd_soc_codec *codec) struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); unsigned long flags; int ret; - const struct firmware *fw; struct spi_message m; struct spi_transfer t; struct dfw_pllrec pll_rec; @@ -623,14 +622,6 @@ static int wm0010_boot(struct snd_soc_codec *codec) wm0010->state = WM0010_OUT_OF_RESET; spin_unlock_irqrestore(&wm0010->irq_lock, flags); - /* First the bootloader */ - ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev); - if (ret != 0) { - dev_err(codec->dev, "Failed to request stage2 loader: %d\n", - ret); - goto abort; - } - if (!wait_for_completion_timeout(&wm0010->boot_completion, msecs_to_jiffies(20))) dev_err(codec->dev, "Failed to get interrupt from DSP\n"); @@ -673,7 +664,7 @@ static int wm0010_boot(struct snd_soc_codec *codec) img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA); if (!img_swap) - goto abort; + goto abort_out; /* We need to re-order for 0010 */ byte_swap_64((u64 *)&pll_rec, img_swap, len); @@ -688,16 +679,16 @@ static int wm0010_boot(struct snd_soc_codec *codec) spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); - if (ret != 0) { + if (ret) { dev_err(codec->dev, "First PLL write failed: %d\n", ret); - goto abort; + goto abort_swap; } /* Use a second send of the message to get the return status */ ret = spi_sync(spi, &m); - if (ret != 0) { + if (ret) { dev_err(codec->dev, "Second PLL write failed: %d\n", ret); - goto abort; + goto abort_swap; } p = (u32 *)out; @@ -730,6 +721,10 @@ static int wm0010_boot(struct snd_soc_codec *codec) return 0; +abort_swap: + kfree(img_swap); +abort_out: + kfree(out); abort: /* Put the chip back into reset */ wm0010_halt(codec); diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 9756578fc752..c04c0bc6f58a 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -38,6 +38,12 @@ struct wm5110_priv { struct arizona_priv core; struct arizona_fll fll[2]; + + unsigned int in_value; + int in_pre_pending; + int in_post_pending; + + unsigned int in_pga_cache[6]; }; static const struct wm_adsp_region wm5110_dsp1_regions[] = { @@ -428,6 +434,127 @@ err: return ret; } +static int wm5110_in_pga_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); + struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); + struct snd_soc_card *card = dapm->card; + int ret; + + /* + * PGA Volume is also used as part of the enable sequence, so + * usage of it should be avoided whilst that is running. + */ + mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); + + ret = snd_soc_get_volsw_range(kcontrol, ucontrol); + + mutex_unlock(&card->dapm_mutex); + + return ret; +} + +static int wm5110_in_pga_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); + struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); + struct snd_soc_card *card = dapm->card; + int ret; + + /* + * PGA Volume is also used as part of the enable sequence, so + * usage of it should be avoided whilst that is running. + */ + mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); + + ret = snd_soc_put_volsw_range(kcontrol, ucontrol); + + mutex_unlock(&card->dapm_mutex); + + return ret; +} + +static int wm5110_in_analog_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); + struct wm5110_priv *wm5110 = snd_soc_codec_get_drvdata(codec); + struct arizona *arizona = priv->arizona; + unsigned int reg, mask; + struct reg_sequence analog_seq[] = { + { 0x80, 0x3 }, + { 0x35d, 0 }, + { 0x80, 0x0 }, + }; + + reg = ARIZONA_IN1L_CONTROL + ((w->shift ^ 0x1) * 4); + mask = ARIZONA_IN1L_PGA_VOL_MASK; + + switch (event) { + case SND_SOC_DAPM_WILL_PMU: + wm5110->in_value |= 0x3 << ((w->shift ^ 0x1) * 2); + wm5110->in_pre_pending++; + wm5110->in_post_pending++; + return 0; + case SND_SOC_DAPM_PRE_PMU: + wm5110->in_pga_cache[w->shift] = snd_soc_read(codec, reg); + + snd_soc_update_bits(codec, reg, mask, + 0x40 << ARIZONA_IN1L_PGA_VOL_SHIFT); + + wm5110->in_pre_pending--; + if (wm5110->in_pre_pending == 0) { + analog_seq[1].def = wm5110->in_value; + regmap_multi_reg_write_bypassed(arizona->regmap, + analog_seq, + ARRAY_SIZE(analog_seq)); + + msleep(55); + + wm5110->in_value = 0; + } + + break; + case SND_SOC_DAPM_POST_PMU: + snd_soc_update_bits(codec, reg, mask, + wm5110->in_pga_cache[w->shift]); + + wm5110->in_post_pending--; + if (wm5110->in_post_pending == 0) + regmap_multi_reg_write_bypassed(arizona->regmap, + analog_seq, + ARRAY_SIZE(analog_seq)); + break; + default: + break; + } + + return 0; +} + +static int wm5110_in_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); + struct arizona *arizona = priv->arizona; + + switch (arizona->rev) { + case 0 ... 4: + if (arizona_input_analog(codec, w->shift)) + wm5110_in_analog_ev(w, kcontrol, event); + + break; + default: + break; + } + + return arizona_in_ev(w, kcontrol, event); +} + static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); @@ -454,18 +581,24 @@ SOC_ENUM("IN2 OSR", arizona_in_dmic_osr[1]), SOC_ENUM("IN3 OSR", arizona_in_dmic_osr[2]), SOC_ENUM("IN4 OSR", arizona_in_dmic_osr[3]), -SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL, - ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), -SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL, - ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), -SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL, - ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), -SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL, - ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), -SOC_SINGLE_RANGE_TLV("IN3L Volume", ARIZONA_IN3L_CONTROL, - ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), -SOC_SINGLE_RANGE_TLV("IN3R Volume", ARIZONA_IN3R_CONTROL, - ARIZONA_IN3R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL, + ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL, + ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL, + ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL, + ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN3L Volume", ARIZONA_IN3L_CONTROL, + ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), +SOC_SINGLE_RANGE_EXT_TLV("IN3R Volume", ARIZONA_IN3R_CONTROL, + ARIZONA_IN3R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, + wm5110_in_pga_get, wm5110_in_pga_put, ana_tlv), SOC_ENUM("IN HPF Cutoff Frequency", arizona_in_hpf_cut_enum), @@ -896,29 +1029,35 @@ SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"), SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"), SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN3L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3L_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT, - 0, NULL, 0, arizona_in_ev, + 0, NULL, 0, wm5110_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_WILL_PMU), SND_SOC_DAPM_PGA_E("IN4L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN4L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index e3b7d0c57411..dbd88408861a 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -211,28 +211,38 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, return wm8960_set_deemph(codec); } -static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0); -static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1); +static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); +static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1725, 75, 0); +static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); -static const DECLARE_TLV_DB_SCALE(boost_tlv, -1200, 300, 1); +static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1); +static const unsigned int micboost_tlv[] = { + TLV_DB_RANGE_HEAD(2), + 0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0), + 2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0), +}; static const struct snd_kcontrol_new wm8960_snd_controls[] = { SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, - 0, 63, 0, adc_tlv), + 0, 63, 0, inpga_tlv), SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 7, 1, 0), SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", - WM8960_INBMIX1, 4, 7, 0, boost_tlv), + WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", - WM8960_INBMIX1, 1, 7, 0, boost_tlv), + WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv), SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", - WM8960_INBMIX2, 4, 7, 0, boost_tlv), + WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv), SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", - WM8960_INBMIX2, 1, 7, 0, boost_tlv), + WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv), +SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume", + WM8960_RINPATH, 4, 3, 0, micboost_tlv), +SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT1 Volume", + WM8960_LINPATH, 4, 3, 0, micboost_tlv), SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC, 0, 255, 0, dac_tlv), diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index b4eb975da981..39ebd7bf4f53 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2944,7 +2944,8 @@ static int wm8962_mute(struct snd_soc_dai *dai, int mute) WM8962_DAC_MUTE, val); } -#define WM8962_RATES SNDRV_PCM_RATE_8000_96000 +#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\ + SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) @@ -3759,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8962, &wm8962_dai, 1); if (ret < 0) - goto err_enable; + goto err_pm_runtime; regcache_cache_only(wm8962->regmap, true); @@ -3768,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, return 0; +err_pm_runtime: + pm_runtime_disable(&i2c->dev); err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); err: @@ -3777,6 +3780,7 @@ err: static int wm8962_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); + pm_runtime_disable(&client->dev); return 0; } @@ -3804,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev) wm8962_reset(wm8962); + regcache_mark_dirty(wm8962->regmap); + /* SYSCLK defaults to on; make sure it is off so we can safely * write to registers if the device is declocked. */ diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index add6bb99661d..4495a40a9468 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -80,12 +80,13 @@ struct davinci_mcasp { /* McASP specific data */ int tdm_slots; + u32 tdm_mask[2]; + int slot_width; u8 op_mode; u8 num_serializer; u8 *serial_dir; u8 version; u8 bclk_div; - u16 bclk_lrclk_ratio; int streams; u32 irq_request[2]; int dma_request[2]; @@ -556,8 +557,21 @@ static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, mcasp->bclk_div = div; break; - case 2: /* BCLK/LRCLK ratio */ - mcasp->bclk_lrclk_ratio = div; + case 2: /* + * BCLK/LRCLK ratio descries how many bit-clock cycles + * fit into one frame. The clock ratio is given for a + * full period of data (for I2S format both left and + * right channels), so it has to be divided by number + * of tdm-slots (for I2S - divided by 2). + * Instead of storing this ratio, we calculate a new + * tdm_slot width by dividing the the ratio by the + * number of configured tdm slots. + */ + mcasp->slot_width = div / mcasp->tdm_slots; + if (div % mcasp->tdm_slots) + dev_warn(mcasp->dev, + "%s(): BCLK/LRCLK %d is not divisible by %d tdm slots", + __func__, div, mcasp->tdm_slots); break; default: @@ -596,12 +610,92 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id, return 0; } +/* All serializers must have equal number of channels */ +static int davinci_mcasp_ch_constraint(struct davinci_mcasp *mcasp, int stream, + int serializers) +{ + struct snd_pcm_hw_constraint_list *cl = &mcasp->chconstr[stream]; + unsigned int *list = (unsigned int *) cl->list; + int slots = mcasp->tdm_slots; + int i, count = 0; + + if (mcasp->tdm_mask[stream]) + slots = hweight32(mcasp->tdm_mask[stream]); + + for (i = 2; i <= slots; i++) + list[count++] = i; + + for (i = 2; i <= serializers; i++) + list[count++] = i*slots; + + cl->count = count; + + return 0; +} + +static int davinci_mcasp_set_ch_constraints(struct davinci_mcasp *mcasp) +{ + int rx_serializers = 0, tx_serializers = 0, ret, i; + + for (i = 0; i < mcasp->num_serializer; i++) + if (mcasp->serial_dir[i] == TX_MODE) + tx_serializers++; + else if (mcasp->serial_dir[i] == RX_MODE) + rx_serializers++; + + ret = davinci_mcasp_ch_constraint(mcasp, SNDRV_PCM_STREAM_PLAYBACK, + tx_serializers); + if (ret) + return ret; + + ret = davinci_mcasp_ch_constraint(mcasp, SNDRV_PCM_STREAM_CAPTURE, + rx_serializers); + + return ret; +} + + +static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai, + unsigned int tx_mask, + unsigned int rx_mask, + int slots, int slot_width) +{ + struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai); + + dev_dbg(mcasp->dev, + "%s() tx_mask 0x%08x rx_mask 0x%08x slots %d width %d\n", + __func__, tx_mask, rx_mask, slots, slot_width); + + if (tx_mask >= (1<<slots) || rx_mask >= (1<<slots)) { + dev_err(mcasp->dev, + "Bad tdm mask tx: 0x%08x rx: 0x%08x slots %d\n", + tx_mask, rx_mask, slots); + return -EINVAL; + } + + if (slot_width && + (slot_width < 8 || slot_width > 32 || slot_width % 4 != 0)) { + dev_err(mcasp->dev, "%s: Unsupported slot_width %d\n", + __func__, slot_width); + return -EINVAL; + } + + mcasp->tdm_slots = slots; + mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask; + mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask; + mcasp->slot_width = slot_width; + + return davinci_mcasp_set_ch_constraints(mcasp); +} + static int davinci_config_channel_size(struct davinci_mcasp *mcasp, - int word_length) + int sample_width) { u32 fmt; - u32 tx_rotate = (word_length / 4) & 0x7; - u32 mask = (1ULL << word_length) - 1; + u32 tx_rotate = (sample_width / 4) & 0x7; + u32 mask = (1ULL << sample_width) - 1; + u32 slot_width = sample_width; + /* * For captured data we should not rotate, inversion and masking is * enoguh to get the data to the right position: @@ -614,28 +708,23 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp, u32 rx_rotate = 0; /* - * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv() - * callback, take it into account here. That allows us to for example - * send 32 bits per channel to the codec, while only 16 of them carry - * audio payload. - * The clock ratio is given for a full period of data (for I2S format - * both left and right channels), so it has to be divided by number of - * tdm-slots (for I2S - divided by 2). + * Setting the tdm slot width either with set_clkdiv() or + * set_tdm_slot() allows us to for example send 32 bits per + * channel to the codec, while only 16 of them carry audio + * payload. */ - if (mcasp->bclk_lrclk_ratio) { - u32 slot_length = mcasp->bclk_lrclk_ratio / mcasp->tdm_slots; - + if (mcasp->slot_width) { /* - * When we have more bclk then it is needed for the data, we - * need to use the rotation to move the received samples to have - * correct alignment. + * When we have more bclk then it is needed for the + * data, we need to use the rotation to move the + * received samples to have correct alignment. */ - rx_rotate = (slot_length - word_length) / 4; - word_length = slot_length; + slot_width = mcasp->slot_width; + rx_rotate = (slot_width - sample_width) / 4; } /* mapping of the XSSZ bit-field as described in the datasheet */ - fmt = (word_length >> 1) - 1; + fmt = (slot_width >> 1) - 1; if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) { mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXSSZ(fmt), @@ -663,7 +752,7 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, u8 rx_ser = 0; u8 slots = mcasp->tdm_slots; u8 max_active_serializers = (channels + slots - 1) / slots; - int active_serializers, numevt, n; + int active_serializers, numevt; u32 reg; /* Default configuration */ if (mcasp->version < MCASP_VERSION_3) @@ -745,9 +834,8 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, * The number of words for numevt need to be in steps of active * serializers. */ - n = numevt % active_serializers; - if (n) - numevt += (active_serializers - n); + numevt = (numevt / active_serializers) * active_serializers; + while (period_words % numevt && numevt > 0) numevt -= active_serializers; if (numevt <= 0) @@ -777,33 +865,50 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream, /* * If more than one serializer is needed, then use them with - * their specified tdm_slots count. Otherwise, one serializer - * can cope with the transaction using as many slots as channels - * in the stream, requires channels symmetry + * all the specified tdm_slots. Otherwise, one serializer can + * cope with the transaction using just as many slots as there + * are channels in the stream. */ - active_serializers = (channels + total_slots - 1) / total_slots; - if (active_serializers == 1) - active_slots = channels; - else - active_slots = total_slots; - - for (i = 0; i < active_slots; i++) - mask |= (1 << i); + if (mcasp->tdm_mask[stream]) { + active_slots = hweight32(mcasp->tdm_mask[stream]); + active_serializers = (channels + active_slots - 1) / + active_slots; + if (active_serializers == 1) { + active_slots = channels; + for (i = 0; i < total_slots; i++) { + if ((1 << i) & mcasp->tdm_mask[stream]) { + mask |= (1 << i); + if (--active_slots <= 0) + break; + } + } + } + } else { + active_serializers = (channels + total_slots - 1) / total_slots; + if (active_serializers == 1) + active_slots = channels; + else + active_slots = total_slots; + for (i = 0; i < active_slots; i++) + mask |= (1 << i); + } mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC); if (!mcasp->dat_port) busel = TXSEL; - mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); - mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); - mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, - FSXMOD(total_slots), FSXMOD(0x1FF)); - - mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); - mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); - mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, - FSRMOD(total_slots), FSRMOD(0x1FF)); + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); + mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); + mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, + FSXMOD(total_slots), FSXMOD(0x1FF)); + } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); + mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, + FSRMOD(total_slots), FSRMOD(0x1FF)); + } return 0; } @@ -923,6 +1028,9 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, int sbits = params_width(params); int ppm, div; + if (mcasp->slot_width) + sbits = mcasp->slot_width; + div = davinci_mcasp_calc_clk_div(mcasp, rate*sbits*slots, &ppm); if (ppm) @@ -1028,6 +1136,9 @@ static int davinci_mcasp_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_interval range; int i; + if (rd->mcasp->slot_width) + sbits = rd->mcasp->slot_width; + snd_interval_any(&range); range.empty = 1; @@ -1070,10 +1181,14 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params, for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) { if (snd_mask_test(fmt, i)) { - uint bclk_freq = snd_pcm_format_width(i)*slots*rate; + uint sbits = snd_pcm_format_width(i); int ppm; - davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq, &ppm); + if (rd->mcasp->slot_width) + sbits = rd->mcasp->slot_width; + + davinci_mcasp_calc_clk_div(rd->mcasp, sbits*slots*rate, + &ppm); if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) { snd_mask_set(&nfmt, i); count++; @@ -1095,6 +1210,10 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, &mcasp->ruledata[substream->stream]; u32 max_channels = 0; int i, dir; + int tdm_slots = mcasp->tdm_slots; + + if (mcasp->tdm_mask[substream->stream]) + tdm_slots = hweight32(mcasp->tdm_mask[substream->stream]); mcasp->substreams[substream->stream] = substream; @@ -1115,7 +1234,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, max_channels++; } ruledata->serializers = max_channels; - max_channels *= mcasp->tdm_slots; + max_channels *= tdm_slots; /* * If the already active stream has less channels than the calculated * limnit based on the seirializers * tdm_slots, we need to use that as @@ -1125,15 +1244,25 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, */ if (mcasp->channels && mcasp->channels < max_channels) max_channels = mcasp->channels; + /* + * But we can always allow channels upto the amount of + * the available tdm_slots. + */ + if (max_channels < tdm_slots) + max_channels = tdm_slots; snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2, max_channels); - if (mcasp->chconstr[substream->stream].count) - snd_pcm_hw_constraint_list(substream->runtime, - 0, SNDRV_PCM_HW_PARAM_CHANNELS, - &mcasp->chconstr[substream->stream]); + snd_pcm_hw_constraint_list(substream->runtime, + 0, SNDRV_PCM_HW_PARAM_CHANNELS, + &mcasp->chconstr[substream->stream]); + + if (mcasp->slot_width) + snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_SAMPLE_BITS, + 8, mcasp->slot_width); /* * If we rely on implicit BCLK divider setting we should @@ -1185,6 +1314,7 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = { .set_fmt = davinci_mcasp_set_dai_fmt, .set_clkdiv = davinci_mcasp_set_clkdiv, .set_sysclk = davinci_mcasp_set_sysclk, + .set_tdm_slot = davinci_mcasp_set_tdm_slot, }; static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai) @@ -1299,6 +1429,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = { .ops = &davinci_mcasp_dai_ops, .symmetric_samplebits = 1, + .symmetric_rates = 1, }, { .name = "davinci-mcasp.1", @@ -1514,59 +1645,6 @@ nodata: return pdata; } -/* All serializers must have equal number of channels */ -static int davinci_mcasp_ch_constraint(struct davinci_mcasp *mcasp, - struct snd_pcm_hw_constraint_list *cl, - int serializers) -{ - unsigned int *list; - int i, count = 0; - - if (serializers <= 1) - return 0; - - list = devm_kzalloc(mcasp->dev, sizeof(unsigned int) * - (mcasp->tdm_slots + serializers - 2), - GFP_KERNEL); - if (!list) - return -ENOMEM; - - for (i = 2; i <= mcasp->tdm_slots; i++) - list[count++] = i; - - for (i = 2; i <= serializers; i++) - list[count++] = i*mcasp->tdm_slots; - - cl->count = count; - cl->list = list; - - return 0; -} - - -static int davinci_mcasp_init_ch_constraints(struct davinci_mcasp *mcasp) -{ - int rx_serializers = 0, tx_serializers = 0, ret, i; - - for (i = 0; i < mcasp->num_serializer; i++) - if (mcasp->serial_dir[i] == TX_MODE) - tx_serializers++; - else if (mcasp->serial_dir[i] == RX_MODE) - rx_serializers++; - - ret = davinci_mcasp_ch_constraint(mcasp, &mcasp->chconstr[ - SNDRV_PCM_STREAM_PLAYBACK], - tx_serializers); - if (ret) - return ret; - - ret = davinci_mcasp_ch_constraint(mcasp, &mcasp->chconstr[ - SNDRV_PCM_STREAM_CAPTURE], - rx_serializers); - - return ret; -} - enum { PCM_EDMA, PCM_SDMA, @@ -1685,7 +1763,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, "common"); if (irq >= 0) { - irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n", + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common", dev_name(&pdev->dev)); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, davinci_mcasp_common_irq_handler, @@ -1702,7 +1780,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, "rx"); if (irq >= 0) { - irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n", + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx", dev_name(&pdev->dev)); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, davinci_mcasp_rx_irq_handler, @@ -1717,7 +1795,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, "tx"); if (irq >= 0) { - irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n", + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx", dev_name(&pdev->dev)); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, davinci_mcasp_tx_irq_handler, @@ -1783,7 +1861,28 @@ static int davinci_mcasp_probe(struct platform_device *pdev) mcasp->fifo_base = DAVINCI_MCASP_V3_AFIFO_BASE; } - ret = davinci_mcasp_init_ch_constraints(mcasp); + /* Allocate memory for long enough list for all possible + * scenarios. Maximum number tdm slots is 32 and there cannot + * be more serializers than given in the configuration. The + * serializer directions could be taken into account, but it + * would make code much more complex and save only couple of + * bytes. + */ + mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list = + devm_kzalloc(mcasp->dev, sizeof(unsigned int) * + (32 + mcasp->num_serializer - 2), + GFP_KERNEL); + + mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list = + devm_kzalloc(mcasp->dev, sizeof(unsigned int) * + (32 + mcasp->num_serializer - 2), + GFP_KERNEL); + + if (!mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list || + !mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list) + return -ENOMEM; + + ret = davinci_mcasp_set_ch_constraints(mcasp); if (ret) goto err; diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index a3e97b46b64e..ba34252b7bba 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c @@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream) if (stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, TOR(i), 0); + i2s_read_reg(dev->i2s_base, TOR(i)); } else { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, ROR(i), 0); + i2s_read_reg(dev->i2s_base, ROR(i)); } } static void i2s_start(struct dw_i2s_dev *dev, struct snd_pcm_substream *substream) { - + u32 i, irq; i2s_write_reg(dev->i2s_base, IER, 1); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30); + } i2s_write_reg(dev->i2s_base, ITER, 1); - else + } else { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03); + } i2s_write_reg(dev->i2s_base, IRER, 1); + } i2s_write_reg(dev->i2s_base, CER, 1); } diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index 5aeb6ed4827e..0901d5e20df2 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -488,7 +488,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; } else { dev_err(&pdev->dev, "unknown Device Tree compatible\n"); - return -EINVAL; + ret = -EINVAL; + goto asrc_fail; } /* Common settings for corresponding Freescale CPU DAI driver */ @@ -592,6 +593,7 @@ static const struct of_device_id fsl_asoc_card_dt_ids[] = { { .compatible = "fsl,imx-audio-wm8960", }, {} }; +MODULE_DEVICE_TABLE(of, fsl_asoc_card_dt_ids); static struct platform_driver fsl_asoc_card_driver = { .probe = fsl_asoc_card_probe, diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index a18fd92c4a85..9366b5a42e1d 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -801,6 +801,7 @@ static const struct of_device_id fsl_sai_ids[] = { { .compatible = "fsl,imx6sx-sai", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, fsl_sai_ids); static struct platform_driver fsl_sai_driver = { .probe = fsl_sai_probe, diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 8ec6fb208ea0..37c5cd4d0e59 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -249,7 +249,8 @@ MODULE_DEVICE_TABLE(of, fsl_ssi_ids); static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) { - return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97); + return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) == + SND_SOC_DAIFMT_AC97; } static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) @@ -947,7 +948,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev, CCSR_SSI_SCR_TCH_EN); } - if (fmt & SND_SOC_DAIFMT_AC97) + if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97) fsl_ssi_setup_ac97(ssi_private); return 0; diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 48b2d24dd1f0..b95132e2f9dc 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* data on rising edge of bclk, frame low 1clk before data */ - strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI | + SSI_STCR_TEFS; scr |= SSI_SCR_NET; if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { scr &= ~SSI_I2S_MODE_MASK; @@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) break; case SND_SOC_DAIFMT_LEFT_J: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_DSP_B: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL; break; case SND_SOC_DAIFMT_DSP_A: /* data on rising edge of bclk, frame high 1clk before data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL | + SSI_STCR_TEFS; break; } /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: - strcr |= SSI_STCR_TFSI; - strcr &= ~SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_IB_NF: - strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); + strcr ^= SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_NB_IF: - strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_NB_NF: - strcr &= ~SSI_STCR_TFSI; - strcr |= SSI_STCR_TSCKP; break; } diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 3ff76d419436..54c33204541f 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -151,7 +151,9 @@ static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai, } if (set->slots) { - ret = snd_soc_dai_set_tdm_slot(dai, 0, 0, + ret = snd_soc_dai_set_tdm_slot(dai, + set->tx_slot_mask, + set->rx_slot_mask, set->slots, set->slot_width); if (ret && ret != -ENOTSUPP) { @@ -243,7 +245,9 @@ asoc_simple_card_sub_parse_of(struct device_node *np, return ret; /* Parse TDM slot */ - ret = snd_soc_of_parse_tdm_slot(np, &dai->slots, &dai->slot_width); + ret = snd_soc_of_parse_tdm_slot(np, &dai->tx_slot_mask, + &dai->rx_slot_mask, + &dai->slots, &dai->slot_width); if (ret) return ret; diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 05fde5e6e257..221e3bd73adb 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -12,6 +12,7 @@ config SND_MFLD_MACHINE config SND_SST_MFLD_PLATFORM tristate + select SND_SOC_COMPRESS config SND_SST_IPC tristate diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 683e50116152..0487cfaac538 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -368,23 +368,6 @@ static void sst_media_close(struct snd_pcm_substream *substream, kfree(stream); } -static inline unsigned int get_current_pipe_id(struct snd_soc_dai *dai, - struct snd_pcm_substream *substream) -{ - struct sst_data *sst = snd_soc_dai_get_drvdata(dai); - struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map; - struct sst_runtime_stream *stream = - substream->runtime->private_data; - u32 str_id = stream->stream_info.str_id; - unsigned int pipe_id; - - pipe_id = map[str_id].device_id; - - dev_dbg(dai->dev, "got pipe_id = %#x for str_id = %d\n", - pipe_id, str_id); - return pipe_id; -} - static int sst_media_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { @@ -529,7 +512,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { }, { .name = "compress-cpu-dai", - .compress_dai = 1, + .compress_new = snd_soc_new_compress, .ops = &sst_compr_dai_ops, .playback = { .stream_name = "Compress Playback", diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c index 8bafaf6ceab1..3f8a1e10bed0 100644 --- a/sound/soc/intel/boards/broadwell.c +++ b/sound/soc/intel/boards/broadwell.c @@ -266,18 +266,11 @@ static int broadwell_audio_probe(struct platform_device *pdev) { broadwell_rt286.dev = &pdev->dev; - return snd_soc_register_card(&broadwell_rt286); -} - -static int broadwell_audio_remove(struct platform_device *pdev) -{ - snd_soc_unregister_card(&broadwell_rt286); - return 0; + return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286); } static struct platform_driver broadwell_audio = { .probe = broadwell_audio_probe, - .remove = broadwell_audio_remove, .driver = { .name = "broadwell-audio", }, diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index f6efa9d4acad..b27f25f70730 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c @@ -302,6 +302,10 @@ struct sst_hsw { struct sst_hsw_ipc_dx_reply dx; void *dx_context; dma_addr_t dx_context_paddr; + enum sst_hsw_device_id dx_dev; + enum sst_hsw_device_mclk dx_mclk; + enum sst_hsw_device_mode dx_mode; + u32 dx_clock_divider; /* boot */ wait_queue_head_t boot_wait; @@ -1400,10 +1404,10 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw, trace_ipc_request("set device config", dev); - config.ssp_interface = dev; - config.clock_frequency = mclk; - config.mode = mode; - config.clock_divider = clock_divider; + hsw->dx_dev = config.ssp_interface = dev; + hsw->dx_mclk = config.clock_frequency = mclk; + hsw->dx_mode = config.mode = mode; + hsw->dx_clock_divider = config.clock_divider = clock_divider; if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER) config.channels = 4; else @@ -1704,10 +1708,10 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw) return -EIO; } - /* Set ADSP SSP port settings */ - ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0, - SST_HSW_DEVICE_MCLK_FREQ_24_MHZ, - SST_HSW_DEVICE_CLOCK_MASTER, 9); + /* Set ADSP SSP port settings - sadly the FW does not store SSP port + settings as part of the PM context. */ + ret = sst_hsw_device_set_config(hsw, hsw->dx_dev, hsw->dx_mclk, + hsw->dx_mode, hsw->dx_clock_divider); if (ret < 0) dev_err(dev, "error: SSP re-initialization failed\n"); diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index 7d617bf493bc..bea26730873c 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -510,17 +510,6 @@ static struct snd_soc_dai_driver skl_platform_dai[] = { }, }, { - .name = "DMIC23 Pin", - .ops = &skl_dmic_dai_ops, - .capture = { - .stream_name = "DMIC23 Rx", - .channels_min = HDA_STEREO, - .channels_max = HDA_STEREO, - .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, - }, -}, -{ .name = "HD-Codec Pin", .ops = &skl_link_dai_ops, .playback = { @@ -538,28 +527,6 @@ static struct snd_soc_dai_driver skl_platform_dai[] = { .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, -{ - .name = "HD-Codec-SPK Pin", - .ops = &skl_link_dai_ops, - .playback = { - .stream_name = "HD-Codec-SPK Tx", - .channels_min = HDA_STEREO, - .channels_max = HDA_STEREO, - .rates = SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE, - }, -}, -{ - .name = "HD-Codec-AMIC Pin", - .ops = &skl_link_dai_ops, - .capture = { - .stream_name = "HD-Codec-AMIC Rx", - .channels_min = HDA_STEREO, - .channels_max = HDA_STEREO, - .rates = SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE, - }, -}, }; static int skl_platform_open(struct snd_pcm_substream *substream) diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index b05fb1c1a848..794a3499e567 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -485,6 +485,7 @@ static const struct of_device_id jz4740_of_matches[] = { { .compatible = "ingenic,jz4780-i2s", .data = (void *)JZ_I2S_JZ4780 }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, jz4740_of_matches); #endif static int jz4740_i2s_dev_probe(struct platform_device *pdev) diff --git a/sound/soc/kirkwood/armada-370-db.c b/sound/soc/kirkwood/armada-370-db.c index de7563bdc5c2..e0304d544f26 100644 --- a/sound/soc/kirkwood/armada-370-db.c +++ b/sound/soc/kirkwood/armada-370-db.c @@ -130,6 +130,7 @@ static const struct of_device_id a370db_dt_ids[] = { { .compatible = "marvell,a370db-audio" }, { }, }; +MODULE_DEVICE_TABLE(of, a370db_dt_ids); static struct platform_driver a370db_driver = { .driver = { diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c index 684e8a78bed0..71a1a35047ba 100644 --- a/sound/soc/mediatek/mt8173-max98090.c +++ b/sound/soc/mediatek/mt8173-max98090.c @@ -179,21 +179,13 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev) } card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); return ret; } -static int mt8173_max98090_dev_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static const struct of_device_id mt8173_max98090_dt_match[] = { { .compatible = "mediatek,mt8173-max98090", }, { } @@ -209,7 +201,6 @@ static struct platform_driver mt8173_max98090_driver = { #endif }, .probe = mt8173_max98090_dev_probe, - .remove = mt8173_max98090_dev_remove, }; module_platform_driver(mt8173_max98090_driver); diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c index 86cf9752f18a..50ba538eccb3 100644 --- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c +++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c @@ -246,21 +246,13 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) card->dev = &pdev->dev; platform_set_drvdata(pdev, card); - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); return ret; } -static int mt8173_rt5650_rt5676_dev_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static const struct of_device_id mt8173_rt5650_rt5676_dt_match[] = { { .compatible = "mediatek,mt8173-rt5650-rt5676", }, { } @@ -276,7 +268,6 @@ static struct platform_driver mt8173_rt5650_rt5676_driver = { #endif }, .probe = mt8173_rt5650_rt5676_dev_probe, - .remove = mt8173_rt5650_rt5676_dev_remove, }; module_platform_driver(mt8173_rt5650_rt5676_driver); diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c index d190fe017559..f5baf3c38863 100644 --- a/sound/soc/mediatek/mtk-afe-pcm.c +++ b/sound/soc/mediatek/mtk-afe-pcm.c @@ -549,6 +549,23 @@ static int mtk_afe_dais_startup(struct snd_pcm_substream *substream, memif->substream = substream; snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); + + /* + * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be + * smaller than period_size due to AFE's internal buffer. + * This easily leads to overrun when avail_min is period_size. + * One more period can hold the possible unread buffer. + */ + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { + ret = snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIODS, + 3, + mtk_afe_hardware.periods_max); + if (ret < 0) { + dev_err(afe->dev, "hw_constraint_minmax failed\n"); + return ret; + } + } ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c index 6e6fce6a14ba..2b23ffbac6b1 100644 --- a/sound/soc/mxs/mxs-sgtl5000.c +++ b/sound/soc/mxs/mxs-sgtl5000.c @@ -142,7 +142,7 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev) card->dev = &pdev->dev; platform_set_drvdata(pdev, card); - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); @@ -154,12 +154,8 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev) static int mxs_sgtl5000_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - mxs_saif_put_mclk(0); - snd_soc_unregister_card(card); - return 0; } diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c index 3bebfb1d3a6f..99538900a253 100644 --- a/sound/soc/omap/rx51.c +++ b/sound/soc/omap/rx51.c @@ -297,7 +297,7 @@ static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd) dev_err(card->dev, "Failed to add TPA6130A2 controls\n"); return err; } - snd_soc_limit_volume(codec, "TPA6130A2 Headphone Playback Volume", 42); + snd_soc_limit_volume(card, "TPA6130A2 Headphone Playback Volume", 42); err = omap_mcbsp_st_add_controls(rtd, 2); if (err < 0) { diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index 39cea80846c3..f2bf8661dd21 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig @@ -1,7 +1,6 @@ config SND_PXA2XX_SOC tristate "SoC Audio for the Intel PXA2xx chip" depends on ARCH_PXA - select SND_ARM select SND_PXA2XX_LIB help Say Y or M if you want to add support for codecs attached to @@ -25,7 +24,6 @@ config SND_PXA2XX_AC97 config SND_PXA2XX_SOC_AC97 tristate select AC97_BUS - select SND_ARM select SND_PXA2XX_LIB_AC97 select SND_SOC_AC97_BUS diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c index 2b26318bc200..6147e86e9b0f 100644 --- a/sound/soc/pxa/brownstone.c +++ b/sound/soc/pxa/brownstone.c @@ -116,26 +116,19 @@ static int brownstone_probe(struct platform_device *pdev) int ret; brownstone.dev = &pdev->dev; - ret = snd_soc_register_card(&brownstone); + ret = devm_snd_soc_register_card(&pdev->dev, &brownstone); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } -static int brownstone_remove(struct platform_device *pdev) -{ - snd_soc_unregister_card(&brownstone); - return 0; -} - static struct platform_driver mmp_driver = { .driver = { .name = "brownstone-audio", .pm = &snd_soc_pm_ops, }, .probe = brownstone_probe, - .remove = brownstone_remove, }; module_platform_driver(mmp_driver); diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index 3580d10c9f28..c97dc13d3608 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c @@ -295,28 +295,19 @@ static int corgi_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } -static int corgi_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static struct platform_driver corgi_driver = { .driver = { .name = "corgi-audio", .pm = &snd_soc_pm_ops, }, .probe = corgi_probe, - .remove = corgi_remove, }; module_platform_driver(corgi_driver); diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index d72e124a3676..1de876529aa1 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c @@ -138,7 +138,7 @@ static int e740_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -149,10 +149,7 @@ static int e740_probe(struct platform_device *pdev) static int e740_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios)); - snd_soc_unregister_card(card); return 0; } diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 48f2d7c2e68c..b7eb7cd5df7d 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c @@ -120,7 +120,7 @@ static int e750_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -131,10 +131,7 @@ static int e750_probe(struct platform_device *pdev) static int e750_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios)); - snd_soc_unregister_card(card); return 0; } diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index 45d4bd46fff6..41bf71466a7b 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c @@ -119,7 +119,7 @@ static int e800_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -130,10 +130,7 @@ static int e800_probe(struct platform_device *pdev) static int e800_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - gpio_free_array(e800_audio_gpios, ARRAY_SIZE(e800_audio_gpios)); - snd_soc_unregister_card(card); return 0; } diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c index 9f8be7cd567e..ecbf2873b7ff 100644 --- a/sound/soc/pxa/hx4700.c +++ b/sound/soc/pxa/hx4700.c @@ -193,7 +193,7 @@ static int hx4700_audio_probe(struct platform_device *pdev) return ret; snd_soc_card_hx4700.dev = &pdev->dev; - ret = snd_soc_register_card(&snd_soc_card_hx4700); + ret = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_hx4700); if (ret) gpio_free_array(hx4700_audio_gpios, ARRAY_SIZE(hx4700_audio_gpios)); @@ -203,8 +203,6 @@ static int hx4700_audio_probe(struct platform_device *pdev) static int hx4700_audio_remove(struct platform_device *pdev) { - snd_soc_unregister_card(&snd_soc_card_hx4700); - gpio_set_value(GPIO92_HX4700_HP_DRIVER, 0); gpio_set_value(GPIO107_HX4700_SPK_nSD, 0); diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c index 29fabbfd21f1..9d0e40771ef5 100644 --- a/sound/soc/pxa/imote2.c +++ b/sound/soc/pxa/imote2.c @@ -72,28 +72,19 @@ static int imote2_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } -static int imote2_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static struct platform_driver imote2_driver = { .driver = { .name = "imote2-audio", .pm = &snd_soc_pm_ops, }, .probe = imote2_probe, - .remove = imote2_remove, }; module_platform_driver(imote2_driver); diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index a9615a574546..29bc60e85e92 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c @@ -181,7 +181,7 @@ static int mioa701_wm9713_probe(struct platform_device *pdev) return -ENODEV; mioa701.dev = &pdev->dev; - rc = snd_soc_register_card(&mioa701); + rc = devm_snd_soc_register_card(&pdev->dev, &mioa701); if (!rc) dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will" "lead to overheating and possible destruction of your device." @@ -189,17 +189,8 @@ static int mioa701_wm9713_probe(struct platform_device *pdev) return rc; } -static int mioa701_wm9713_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static struct platform_driver mioa701_wm9713_driver = { .probe = mioa701_wm9713_probe, - .remove = mioa701_wm9713_remove, .driver = { .name = "mioa701-wm9713", .pm = &snd_soc_pm_ops, diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index c20bbc042425..4e74d9573f03 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c @@ -140,22 +140,15 @@ static int palm27x_asoc_probe(struct platform_device *pdev) palm27x_asoc.dev = &pdev->dev; - ret = snd_soc_register_card(&palm27x_asoc); + ret = devm_snd_soc_register_card(&pdev->dev, &palm27x_asoc); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } -static int palm27x_asoc_remove(struct platform_device *pdev) -{ - snd_soc_unregister_card(&palm27x_asoc); - return 0; -} - static struct platform_driver palm27x_wm9712_driver = { .probe = palm27x_asoc_probe, - .remove = palm27x_asoc_remove, .driver = { .name = "palm27x-asoc", .pm = &snd_soc_pm_ops, diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c index 80b457ac522a..84d0e2e50808 100644 --- a/sound/soc/pxa/poodle.c +++ b/sound/soc/pxa/poodle.c @@ -267,28 +267,19 @@ static int poodle_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } -static int poodle_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - return 0; -} - static struct platform_driver poodle_driver = { .driver = { .name = "poodle-audio", .pm = &snd_soc_pm_ops, }, .probe = poodle_probe, - .remove = poodle_remove, }; module_platform_driver(poodle_driver); diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index 3da485ec1de7..da03fad1b9cd 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -809,6 +809,7 @@ static const struct of_device_id pxa_ssp_of_ids[] = { { .compatible = "mrvl,pxa-ssp-dai" }, {} }; +MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); #endif static int asoc_ssp_probe(struct platform_device *pdev) diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c index 1f6054650991..9e4b04e0fbd1 100644 --- a/sound/soc/pxa/pxa2xx-ac97.c +++ b/sound/soc/pxa/pxa2xx-ac97.c @@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = { .reset = pxa2xx_ac97_cold_reset, }; -static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12; +static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11; static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { .addr = __PREG(PCDR), .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, @@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, }; -static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11; +static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12; static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { .addr = __PREG(PCDR), .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index 831ee37d2e3e..29a3fdbb7b59 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c @@ -132,6 +132,7 @@ static const struct of_device_id snd_soc_pxa_audio_match[] = { { .compatible = "mrvl,pxa-pcm-audio" }, { } }; +MODULE_DEVICE_TABLE(of, snd_soc_pxa_audio_match); #endif static struct platform_driver pxa_pcm_driver = { diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c index 461123ad5ff2..b00222620fd0 100644 --- a/sound/soc/pxa/spitz.c +++ b/sound/soc/pxa/spitz.c @@ -305,7 +305,7 @@ static int spitz_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -322,9 +322,6 @@ err1: static int spitz_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); gpio_free(spitz_mic_gpio); return 0; } diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index f59f566551ef..49518dd642aa 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c @@ -233,7 +233,7 @@ static int tosa_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -244,10 +244,7 @@ static int tosa_probe(struct platform_device *pdev) static int tosa_remove(struct platform_device *pdev) { - struct snd_soc_card *card = platform_get_drvdata(pdev); - gpio_free(TOSA_GPIO_L_MUTE); - snd_soc_unregister_card(card); return 0; } diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c index 1753c7d9e760..65c20f779177 100644 --- a/sound/soc/pxa/ttc-dkb.c +++ b/sound/soc/pxa/ttc-dkb.c @@ -128,7 +128,7 @@ static int ttc_dkb_probe(struct platform_device *pdev) card->dev = &pdev->dev; - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -136,22 +136,12 @@ static int ttc_dkb_probe(struct platform_device *pdev) return ret; } -static int ttc_dkb_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; -} - static struct platform_driver ttc_dkb_driver = { .driver = { .name = "ttc-dkb-audio", .pm = &snd_soc_pm_ops, }, .probe = ttc_dkb_probe, - .remove = ttc_dkb_remove, }; module_platform_driver(ttc_dkb_driver); diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 97bc2023f08a..e5101e0d2d37 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -438,7 +438,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev) if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) { dev_err(&pdev->dev, "%s() error getting mi2s-bit-clk: %ld\n", - __func__, PTR_ERR(drvdata->mi2s_bit_clk[i])); + __func__, + PTR_ERR(drvdata->mi2s_bit_clk[dai_id])); return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]); } } diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig index 58bae8e2cf5f..570905709d3a 100644 --- a/sound/soc/rockchip/Kconfig +++ b/sound/soc/rockchip/Kconfig @@ -17,7 +17,7 @@ config SND_SOC_ROCKCHIP_I2S config SND_SOC_ROCKCHIP_MAX98090 tristate "ASoC support for Rockchip boards using a MAX98090 codec" - depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB + depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP select SND_SOC_ROCKCHIP_I2S select SND_SOC_MAX98090 select SND_SOC_TS3A227E @@ -27,7 +27,7 @@ config SND_SOC_ROCKCHIP_MAX98090 config SND_SOC_ROCKCHIP_RT5645 tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec" - depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB + depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP select SND_SOC_ROCKCHIP_I2S select SND_SOC_RT5645 help diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig index 07114b0b0dc1..6ca90aaf141f 100644 --- a/sound/soc/sh/Kconfig +++ b/sound/soc/sh/Kconfig @@ -37,6 +37,7 @@ config SND_SOC_SH4_SIU config SND_SOC_RCAR tristate "R-Car series SRU/SCU/SSIU/SSI support" depends on DMA_OF + depends on COMMON_CLK select SND_SIMPLE_CARD select REGMAP_MMIO help diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index fefc881dbac2..c4ebbb7a7b6f 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ -#include <linux/sh_clk.h> +#include <linux/clk-provider.h> #include "rsnd.h" #define CLKA 0 @@ -16,12 +16,26 @@ #define CLKI 3 #define CLKMAX 4 +#define CLKOUT 0 +#define CLKOUT1 1 +#define CLKOUT2 2 +#define CLKOUT3 3 +#define CLKOUTMAX 4 + +#define BRRx_MASK(x) (0x3FF & x) + +static struct rsnd_mod_ops adg_ops = { + .name = "adg", +}; + struct rsnd_adg { struct clk *clk[CLKMAX]; + struct clk *clkout[CLKOUTMAX]; + struct clk_onecell_data onecell; + struct rsnd_mod mod; - int rbga_rate_for_441khz_div_6; /* RBGA */ - int rbgb_rate_for_48khz_div_6; /* RBGB */ - u32 ckr; + int rbga_rate_for_441khz; /* RBGA */ + int rbgb_rate_for_48khz; /* RBGB */ }; #define for_each_rsnd_clk(pos, adg, i) \ @@ -29,8 +43,28 @@ struct rsnd_adg { (i < CLKMAX) && \ ((pos) = adg->clk[i]); \ i++) +#define for_each_rsnd_clkout(pos, adg, i) \ + for (i = 0; \ + (i < CLKOUTMAX) && \ + ((pos) = adg->clkout[i]); \ + i++) #define rsnd_priv_to_adg(priv) ((struct rsnd_adg *)(priv)->adg) +static u32 rsnd_adg_calculate_rbgx(unsigned long div) +{ + int i, ratio; + + if (!div) + return 0; + + for (i = 3; i >= 0; i--) { + ratio = 2 << (i * 2); + if (0 == (div % ratio)) + return (u32)((i << 8) | ((div / ratio) - 1)); + } + + return ~0; +} static u32 rsnd_adg_ssi_ws_timing_gen2(struct rsnd_dai_stream *io) { @@ -60,6 +94,9 @@ static u32 rsnd_adg_ssi_ws_timing_gen2(struct rsnd_dai_stream *io) int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io) { + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_adg *adg = rsnd_priv_to_adg(priv); + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); int id = rsnd_mod_id(mod); int shift = (id % 2) ? 16 : 0; u32 mask, val; @@ -69,21 +106,26 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod, val = val << shift; mask = 0xffff << shift; - rsnd_mod_bset(mod, CMDOUT_TIMSEL, mask, val); + rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val); return 0; } -static int rsnd_adg_set_src_timsel_gen2(struct rsnd_mod *mod, +static int rsnd_adg_set_src_timsel_gen2(struct rsnd_mod *src_mod, struct rsnd_dai_stream *io, u32 timsel) { + struct rsnd_priv *priv = rsnd_mod_to_priv(src_mod); + struct rsnd_adg *adg = rsnd_priv_to_adg(priv); + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); int is_play = rsnd_io_is_play(io); - int id = rsnd_mod_id(mod); + int id = rsnd_mod_id(src_mod); int shift = (id % 2) ? 16 : 0; u32 mask, ws; u32 in, out; + rsnd_mod_confirm_src(src_mod); + ws = rsnd_adg_ssi_ws_timing_gen2(io); in = (is_play) ? timsel : ws; @@ -95,37 +137,38 @@ static int rsnd_adg_set_src_timsel_gen2(struct rsnd_mod *mod, switch (id / 2) { case 0: - rsnd_mod_bset(mod, SRCIN_TIMSEL0, mask, in); - rsnd_mod_bset(mod, SRCOUT_TIMSEL0, mask, out); + rsnd_mod_bset(adg_mod, SRCIN_TIMSEL0, mask, in); + rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL0, mask, out); break; case 1: - rsnd_mod_bset(mod, SRCIN_TIMSEL1, mask, in); - rsnd_mod_bset(mod, SRCOUT_TIMSEL1, mask, out); + rsnd_mod_bset(adg_mod, SRCIN_TIMSEL1, mask, in); + rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL1, mask, out); break; case 2: - rsnd_mod_bset(mod, SRCIN_TIMSEL2, mask, in); - rsnd_mod_bset(mod, SRCOUT_TIMSEL2, mask, out); + rsnd_mod_bset(adg_mod, SRCIN_TIMSEL2, mask, in); + rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL2, mask, out); break; case 3: - rsnd_mod_bset(mod, SRCIN_TIMSEL3, mask, in); - rsnd_mod_bset(mod, SRCOUT_TIMSEL3, mask, out); + rsnd_mod_bset(adg_mod, SRCIN_TIMSEL3, mask, in); + rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL3, mask, out); break; case 4: - rsnd_mod_bset(mod, SRCIN_TIMSEL4, mask, in); - rsnd_mod_bset(mod, SRCOUT_TIMSEL4, mask, out); + rsnd_mod_bset(adg_mod, SRCIN_TIMSEL4, mask, in); + rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL4, mask, out); break; } return 0; } -int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod, +int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *src_mod, struct rsnd_dai_stream *io, unsigned int src_rate, unsigned int dst_rate) { - struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_priv *priv = rsnd_mod_to_priv(src_mod); struct rsnd_adg *adg = rsnd_priv_to_adg(priv); + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); struct device *dev = rsnd_priv_to_dev(priv); int idx, sel, div, step, ret; u32 val, en; @@ -134,10 +177,12 @@ int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod, clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */ clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */ clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */ - adg->rbga_rate_for_441khz_div_6,/* 0011: RBGA */ - adg->rbgb_rate_for_48khz_div_6, /* 0100: RBGB */ + adg->rbga_rate_for_441khz, /* 0011: RBGA */ + adg->rbgb_rate_for_48khz, /* 0100: RBGB */ }; + rsnd_mod_confirm_src(src_mod); + min = ~0; val = 0; en = 0; @@ -175,25 +220,27 @@ int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod, return -EIO; } - ret = rsnd_adg_set_src_timsel_gen2(mod, io, val); + ret = rsnd_adg_set_src_timsel_gen2(src_mod, io, val); if (ret < 0) { dev_err(dev, "timsel error\n"); return ret; } - rsnd_mod_bset(mod, DIV_EN, en, en); + rsnd_mod_bset(adg_mod, DIV_EN, en, en); dev_dbg(dev, "convert rate %d <-> %d\n", src_rate, dst_rate); return 0; } -int rsnd_adg_set_convert_timing_gen2(struct rsnd_mod *mod, +int rsnd_adg_set_convert_timing_gen2(struct rsnd_mod *src_mod, struct rsnd_dai_stream *io) { u32 val = rsnd_adg_ssi_ws_timing_gen2(io); - return rsnd_adg_set_src_timsel_gen2(mod, io, val); + rsnd_mod_confirm_src(src_mod); + + return rsnd_adg_set_src_timsel_gen2(src_mod, io, val); } int rsnd_adg_set_convert_clk_gen1(struct rsnd_priv *priv, @@ -202,6 +249,7 @@ int rsnd_adg_set_convert_clk_gen1(struct rsnd_priv *priv, unsigned int dst_rate) { struct rsnd_adg *adg = rsnd_priv_to_adg(priv); + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); struct device *dev = rsnd_priv_to_dev(priv); int idx, sel, div, shift; u32 mask, val; @@ -211,8 +259,8 @@ int rsnd_adg_set_convert_clk_gen1(struct rsnd_priv *priv, clk_get_rate(adg->clk[CLKB]), /* 001: CLKB */ clk_get_rate(adg->clk[CLKC]), /* 010: CLKC */ 0, /* 011: MLBCLK (not used) */ - adg->rbga_rate_for_441khz_div_6,/* 100: RBGA */ - adg->rbgb_rate_for_48khz_div_6, /* 101: RBGB */ + adg->rbga_rate_for_441khz, /* 100: RBGA */ + adg->rbgb_rate_for_48khz, /* 101: RBGB */ }; /* find div (= 1/128, 1/256, 1/512, 1/1024, 1/2048 */ @@ -238,13 +286,13 @@ find_rate: switch (id / 4) { case 0: - rsnd_mod_bset(mod, AUDIO_CLK_SEL3, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL3, mask, val); break; case 1: - rsnd_mod_bset(mod, AUDIO_CLK_SEL4, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL4, mask, val); break; case 2: - rsnd_mod_bset(mod, AUDIO_CLK_SEL5, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL5, mask, val); break; } @@ -257,12 +305,17 @@ find_rate: return 0; } -static void rsnd_adg_set_ssi_clk(struct rsnd_mod *mod, u32 val) +static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val) { - int id = rsnd_mod_id(mod); + struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod); + struct rsnd_adg *adg = rsnd_priv_to_adg(priv); + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); + int id = rsnd_mod_id(ssi_mod); int shift = (id % 4) * 8; u32 mask = 0xFF << shift; + rsnd_mod_confirm_ssi(ssi_mod); + val = val << shift; /* @@ -274,13 +327,13 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *mod, u32 val) switch (id / 4) { case 0: - rsnd_mod_bset(mod, AUDIO_CLK_SEL0, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL0, mask, val); break; case 1: - rsnd_mod_bset(mod, AUDIO_CLK_SEL1, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL1, mask, val); break; case 2: - rsnd_mod_bset(mod, AUDIO_CLK_SEL2, mask, val); + rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL2, mask, val); break; } } @@ -326,14 +379,14 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate) } /* - * find 1/6 clock from BRGA/BRGB + * find divided clock from BRGA/BRGB */ - if (rate == adg->rbga_rate_for_441khz_div_6) { + if (rate == adg->rbga_rate_for_441khz) { data = 0x10; goto found_clock; } - if (rate == adg->rbgb_rate_for_48khz_div_6) { + if (rate == adg->rbgb_rate_for_48khz) { data = 0x20; goto found_clock; } @@ -342,29 +395,60 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate) found_clock: - /* see rsnd_adg_ssi_clk_init() */ - rsnd_mod_bset(mod, SSICKR, 0x00FF0000, adg->ckr); - rsnd_mod_write(mod, BRRA, 0x00000002); /* 1/6 */ - rsnd_mod_write(mod, BRRB, 0x00000002); /* 1/6 */ - /* * This "mod" = "ssi" here. * we can get "ssi id" from mod */ rsnd_adg_set_ssi_clk(mod, data); - dev_dbg(dev, "ADG: ssi%d selects clk%d = %d", - rsnd_mod_id(mod), i, rate); + dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n", + rsnd_mod_name(mod), rsnd_mod_id(mod), + data, rate); return 0; } -static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg) +static void rsnd_adg_get_clkin(struct rsnd_priv *priv, + struct rsnd_adg *adg) { + struct device *dev = rsnd_priv_to_dev(priv); struct clk *clk; - unsigned long rate; - u32 ckr; + static const char * const clk_name[] = { + [CLKA] = "clk_a", + [CLKB] = "clk_b", + [CLKC] = "clk_c", + [CLKI] = "clk_i", + }; int i; + + for (i = 0; i < CLKMAX; i++) { + clk = devm_clk_get(dev, clk_name[i]); + adg->clk[i] = IS_ERR(clk) ? NULL : clk; + } + + for_each_rsnd_clk(clk, adg, i) + dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk)); +} + +static void rsnd_adg_get_clkout(struct rsnd_priv *priv, + struct rsnd_adg *adg) +{ + struct clk *clk; + struct rsnd_mod *adg_mod = rsnd_mod_get(adg); + struct device *dev = rsnd_priv_to_dev(priv); + struct device_node *np = dev->of_node; + u32 ckr, rbgx, rbga, rbgb; + u32 rate, req_rate, div; + uint32_t count = 0; + unsigned long req_48kHz_rate, req_441kHz_rate; + int i; + const char *parent_clk_name = NULL; + static const char * const clkout_name[] = { + [CLKOUT] = "audio_clkout", + [CLKOUT1] = "audio_clkout1", + [CLKOUT2] = "audio_clkout2", + [CLKOUT3] = "audio_clkout3", + }; int brg_table[] = { [CLKA] = 0x0, [CLKB] = 0x1, @@ -372,19 +456,34 @@ static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg) [CLKI] = 0x2, }; + of_property_read_u32(np, "#clock-cells", &count); + + /* + * ADG supports BRRA/BRRB output only + * this means all clkout0/1/2/3 will be same rate + */ + of_property_read_u32(np, "clock-frequency", &req_rate); + req_48kHz_rate = 0; + req_441kHz_rate = 0; + if (0 == (req_rate % 44100)) + req_441kHz_rate = req_rate; + if (0 == (req_rate % 48000)) + req_48kHz_rate = req_rate; + /* * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC * have 44.1kHz or 48kHz base clocks for now. * * SSI itself can divide parent clock by 1/1 - 1/16 - * So, BRGA outputs 44.1kHz base parent clock 1/32, - * and, BRGB outputs 48.0kHz base parent clock 1/32 here. * see * rsnd_adg_ssi_clk_try_start() + * rsnd_ssi_master_clk_start() */ ckr = 0; - adg->rbga_rate_for_441khz_div_6 = 0; - adg->rbgb_rate_for_48khz_div_6 = 0; + rbga = 2; /* default 1/6 */ + rbgb = 2; /* default 1/6 */ + adg->rbga_rate_for_441khz = 0; + adg->rbgb_rate_for_48khz = 0; for_each_rsnd_clk(clk, adg, i) { rate = clk_get_rate(clk); @@ -392,19 +491,86 @@ static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg) continue; /* RBGA */ - if (!adg->rbga_rate_for_441khz_div_6 && (0 == rate % 44100)) { - adg->rbga_rate_for_441khz_div_6 = rate / 6; - ckr |= brg_table[i] << 20; + if (!adg->rbga_rate_for_441khz && (0 == rate % 44100)) { + div = 6; + if (req_441kHz_rate) + div = rate / req_441kHz_rate; + rbgx = rsnd_adg_calculate_rbgx(div); + if (BRRx_MASK(rbgx) == rbgx) { + rbga = rbgx; + adg->rbga_rate_for_441khz = rate / div; + ckr |= brg_table[i] << 20; + if (req_441kHz_rate) + parent_clk_name = __clk_get_name(clk); + } } /* RBGB */ - if (!adg->rbgb_rate_for_48khz_div_6 && (0 == rate % 48000)) { - adg->rbgb_rate_for_48khz_div_6 = rate / 6; - ckr |= brg_table[i] << 16; + if (!adg->rbgb_rate_for_48khz && (0 == rate % 48000)) { + div = 6; + if (req_48kHz_rate) + div = rate / req_48kHz_rate; + rbgx = rsnd_adg_calculate_rbgx(div); + if (BRRx_MASK(rbgx) == rbgx) { + rbgb = rbgx; + adg->rbgb_rate_for_48khz = rate / div; + ckr |= brg_table[i] << 16; + if (req_48kHz_rate) { + parent_clk_name = __clk_get_name(clk); + ckr |= 0x80000000; + } + } + } + } + + /* + * ADG supports BRRA/BRRB output only. + * this means all clkout0/1/2/3 will be * same rate + */ + + /* + * for clkout + */ + if (!count) { + clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT], + parent_clk_name, + (parent_clk_name) ? + 0 : CLK_IS_ROOT, req_rate); + if (!IS_ERR(clk)) { + adg->clkout[CLKOUT] = clk; + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } + } + /* + * for clkout0/1/2/3 + */ + else { + for (i = 0; i < CLKOUTMAX; i++) { + clk = clk_register_fixed_rate(dev, clkout_name[i], + parent_clk_name, + (parent_clk_name) ? + 0 : CLK_IS_ROOT, + req_rate); + if (!IS_ERR(clk)) { + adg->onecell.clks = adg->clkout; + adg->onecell.clk_num = CLKOUTMAX; + + adg->clkout[i] = clk; + + of_clk_add_provider(np, of_clk_src_onecell_get, + &adg->onecell); + } } } - adg->ckr = ckr; + rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); + rsnd_mod_write(adg_mod, BRRA, rbga); + rsnd_mod_write(adg_mod, BRRB, rbgb); + + for_each_rsnd_clkout(clk, adg, i) + dev_dbg(dev, "clkout %d : %p : %ld\n", i, clk, clk_get_rate(clk)); + dev_dbg(dev, "SSICKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n", + ckr, rbga, rbgb); } int rsnd_adg_probe(struct platform_device *pdev, @@ -413,8 +579,6 @@ int rsnd_adg_probe(struct platform_device *pdev, { struct rsnd_adg *adg; struct device *dev = rsnd_priv_to_dev(priv); - struct clk *clk; - int i; adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL); if (!adg) { @@ -422,15 +586,16 @@ int rsnd_adg_probe(struct platform_device *pdev, return -ENOMEM; } - adg->clk[CLKA] = devm_clk_get(dev, "clk_a"); - adg->clk[CLKB] = devm_clk_get(dev, "clk_b"); - adg->clk[CLKC] = devm_clk_get(dev, "clk_c"); - adg->clk[CLKI] = devm_clk_get(dev, "clk_i"); - - for_each_rsnd_clk(clk, adg, i) - dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk)); + /* + * ADG is special module. + * Use ADG mod without rsnd_mod_init() to make debug easy + * for rsnd_write/rsnd_read + */ + adg->mod.ops = &adg_ops; + adg->mod.priv = priv; - rsnd_adg_ssi_clk_init(priv, adg); + rsnd_adg_get_clkin(priv, adg); + rsnd_adg_get_clkout(priv, adg); priv->adg = adg; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index f3feed5ce9b6..eec294da81e3 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -110,6 +110,7 @@ static const struct rsnd_of_data rsnd_of_data_gen2 = { static const struct of_device_id rsnd_of_match[] = { { .compatible = "renesas,rcar_sound-gen1", .data = &rsnd_of_data_gen1 }, { .compatible = "renesas,rcar_sound-gen2", .data = &rsnd_of_data_gen2 }, + { .compatible = "renesas,rcar_sound-gen3", .data = &rsnd_of_data_gen2 }, /* gen2 compatible */ {}, }; MODULE_DEVICE_TABLE(of, rsnd_of_match); @@ -126,6 +127,17 @@ MODULE_DEVICE_TABLE(of, rsnd_of_match); #define rsnd_info_id(priv, io, name) \ ((io)->info->name - priv->info->name##_info) +void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type) +{ + if (mod->type != type) { + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct device *dev = rsnd_priv_to_dev(priv); + + dev_warn(dev, "%s[%d] is not your expected module\n", + rsnd_mod_name(mod), rsnd_mod_id(mod)); + } +} + /* * rsnd_mod functions */ diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c index 05498bba5874..a3e7c716e1f7 100644 --- a/sound/soc/sh/rcar/ctu.c +++ b/sound/soc/sh/rcar/ctu.c @@ -66,7 +66,7 @@ struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id) if (WARN_ON(id < 0 || id >= rsnd_ctu_nr(priv))) id = 0; - return &((struct rsnd_ctu *)(priv->ctu) + id)->mod; + return rsnd_mod_get((struct rsnd_ctu *)(priv->ctu) + id); } static void rsnd_of_parse_ctu(struct platform_device *pdev, @@ -150,7 +150,7 @@ int rsnd_ctu_probe(struct platform_device *pdev, ctu->info = &info->ctu_info[i]; - ret = rsnd_mod_init(priv, &ctu->mod, &rsnd_ctu_ops, + ret = rsnd_mod_init(priv, rsnd_mod_get(ctu), &rsnd_ctu_ops, clk, RSND_MOD_CTU, i); if (ret) return ret; @@ -166,6 +166,6 @@ void rsnd_ctu_remove(struct platform_device *pdev, int i; for_each_rsnd_ctu(ctu, priv, i) { - rsnd_mod_quit(&ctu->mod); + rsnd_mod_quit(rsnd_mod_get(ctu)); } } diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c index 57796387d482..8d8eee6350c9 100644 --- a/sound/soc/sh/rcar/dvc.c +++ b/sound/soc/sh/rcar/dvc.c @@ -282,7 +282,7 @@ struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id) if (WARN_ON(id < 0 || id >= rsnd_dvc_nr(priv))) id = 0; - return &((struct rsnd_dvc *)(priv->dvc) + id)->mod; + return rsnd_mod_get((struct rsnd_dvc *)(priv->dvc) + id); } static void rsnd_of_parse_dvc(struct platform_device *pdev, @@ -361,7 +361,7 @@ int rsnd_dvc_probe(struct platform_device *pdev, dvc->info = &info->dvc_info[i]; - ret = rsnd_mod_init(priv, &dvc->mod, &rsnd_dvc_ops, + ret = rsnd_mod_init(priv, rsnd_mod_get(dvc), &rsnd_dvc_ops, clk, RSND_MOD_DVC, i); if (ret) return ret; @@ -377,6 +377,6 @@ void rsnd_dvc_remove(struct platform_device *pdev, int i; for_each_rsnd_dvc(dvc, priv, i) { - rsnd_mod_quit(&dvc->mod); + rsnd_mod_quit(rsnd_mod_get(dvc)); } } diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c index 0d5c102db6f5..8544403ffb26 100644 --- a/sound/soc/sh/rcar/mix.c +++ b/sound/soc/sh/rcar/mix.c @@ -99,7 +99,7 @@ struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id) if (WARN_ON(id < 0 || id >= rsnd_mix_nr(priv))) id = 0; - return &((struct rsnd_mix *)(priv->mix) + id)->mod; + return rsnd_mod_get((struct rsnd_mix *)(priv->mix) + id); } static void rsnd_of_parse_mix(struct platform_device *pdev, @@ -179,7 +179,7 @@ int rsnd_mix_probe(struct platform_device *pdev, mix->info = &info->mix_info[i]; - ret = rsnd_mod_init(priv, &mix->mod, &rsnd_mix_ops, + ret = rsnd_mod_init(priv, rsnd_mod_get(mix), &rsnd_mix_ops, clk, RSND_MOD_MIX, i); if (ret) return ret; @@ -195,6 +195,6 @@ void rsnd_mix_remove(struct platform_device *pdev, int i; for_each_rsnd_mix(mix, priv, i) { - rsnd_mod_quit(&mix->mod); + rsnd_mod_quit(rsnd_mod_get(mix)); } } diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 7a0e52b4640a..e4068d78616c 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -214,6 +214,7 @@ struct rsnd_dma { }; #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en) #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp) +#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma) void rsnd_dma_start(struct rsnd_dai_stream *io, struct rsnd_dma *dma); void rsnd_dma_stop(struct rsnd_dai_stream *io, struct rsnd_dma *dma); @@ -225,8 +226,6 @@ int rsnd_dma_probe(struct platform_device *pdev, struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, struct rsnd_mod *mod, char *name); -#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma) - /* * R-Car sound mod */ @@ -332,6 +331,7 @@ struct rsnd_mod { #define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1) #define rsnd_mod_hw_start(mod) clk_enable((mod)->clk) #define rsnd_mod_hw_stop(mod) clk_disable((mod)->clk) +#define rsnd_mod_get(ip) (&(ip)->mod) int rsnd_mod_init(struct rsnd_priv *priv, struct rsnd_mod *mod, @@ -627,4 +627,15 @@ void rsnd_dvc_remove(struct platform_device *pdev, struct rsnd_priv *priv); struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id); +#ifdef DEBUG +void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type); +#define rsnd_mod_confirm_ssi(mssi) rsnd_mod_make_sure(mssi, RSND_MOD_SSI) +#define rsnd_mod_confirm_src(msrc) rsnd_mod_make_sure(msrc, RSND_MOD_SRC) +#define rsnd_mod_confirm_dvc(mdvc) rsnd_mod_make_sure(mdvc, RSND_MOD_DVC) +#else +#define rsnd_mod_confirm_ssi(mssi) +#define rsnd_mod_confirm_src(msrc) +#define rsnd_mod_confirm_dvc(mdvc) +#endif + #endif diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 89a18e102feb..ca7a20f03c9b 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -918,11 +918,10 @@ static void rsnd_src_reconvert_update(struct rsnd_dai_stream *io, rsnd_mod_write(mod, SRC_IFSVR, fsrate); } -static int rsnd_src_pcm_new(struct rsnd_mod *mod, +static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct snd_soc_pcm_runtime *rtd) { - struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_dai *rdai = rsnd_io_to_rdai(io); struct rsnd_src *src = rsnd_mod_to_src(mod); int ret; @@ -932,12 +931,6 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod, */ /* - * Gen1 is not supported - */ - if (rsnd_is_gen1(priv)) - return 0; - - /* * SRC sync convert needs clock master */ if (!rsnd_rdai_is_clk_master(rdai)) @@ -975,7 +968,7 @@ static struct rsnd_mod_ops rsnd_src_gen2_ops = { .start = rsnd_src_start_gen2, .stop = rsnd_src_stop_gen2, .hw_params = rsnd_src_hw_params, - .pcm_new = rsnd_src_pcm_new, + .pcm_new = rsnd_src_pcm_new_gen2, }; struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id) @@ -983,7 +976,7 @@ struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id) if (WARN_ON(id < 0 || id >= rsnd_src_nr(priv))) id = 0; - return &((struct rsnd_src *)(priv->src) + id)->mod; + return rsnd_mod_get((struct rsnd_src *)(priv->src) + id); } static void rsnd_of_parse_src(struct platform_device *pdev, @@ -1078,7 +1071,7 @@ int rsnd_src_probe(struct platform_device *pdev, src->info = &info->src_info[i]; - ret = rsnd_mod_init(priv, &src->mod, ops, clk, RSND_MOD_SRC, i); + ret = rsnd_mod_init(priv, rsnd_mod_get(src), ops, clk, RSND_MOD_SRC, i); if (ret) return ret; } @@ -1093,6 +1086,6 @@ void rsnd_src_remove(struct platform_device *pdev, int i; for_each_rsnd_src(src, priv, i) { - rsnd_mod_quit(&src->mod); + rsnd_mod_quit(rsnd_mod_get(src)); } } diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index d45b9a7e324e..5e05f9422073 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -128,10 +128,8 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi, struct rsnd_priv *priv = rsnd_io_to_priv(io); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct device *dev = rsnd_priv_to_dev(priv); - int i, j, ret; - int adg_clk_div_table[] = { - 1, 6, /* see adg.c */ - }; + struct rsnd_mod *mod = rsnd_mod_get(ssi); + int j, ret; int ssi_clk_mul_table[] = { 1, 2, 4, 8, 16, 6, 12, }; @@ -141,28 +139,25 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi, /* * Find best clock, and try to start ADG */ - for (i = 0; i < ARRAY_SIZE(adg_clk_div_table); i++) { - for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { - - /* - * this driver is assuming that - * system word is 64fs (= 2 x 32bit) - * see rsnd_ssi_init() - */ - main_rate = rate / adg_clk_div_table[i] - * 32 * 2 * ssi_clk_mul_table[j]; - - ret = rsnd_adg_ssi_clk_try_start(&ssi->mod, main_rate); - if (0 == ret) { - ssi->cr_clk = FORCE | SWL_32 | - SCKD | SWSD | CKDV(j); - - dev_dbg(dev, "%s[%d] outputs %u Hz\n", - rsnd_mod_name(&ssi->mod), - rsnd_mod_id(&ssi->mod), rate); - - return 0; - } + for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { + + /* + * this driver is assuming that + * system word is 64fs (= 2 x 32bit) + * see rsnd_ssi_init() + */ + main_rate = rate * 32 * 2 * ssi_clk_mul_table[j]; + + ret = rsnd_adg_ssi_clk_try_start(mod, main_rate); + if (0 == ret) { + ssi->cr_clk = FORCE | SWL_32 | + SCKD | SWSD | CKDV(j); + + dev_dbg(dev, "%s[%d] outputs %u Hz\n", + rsnd_mod_name(mod), + rsnd_mod_id(mod), rate); + + return 0; } } @@ -172,8 +167,10 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi, static void rsnd_ssi_master_clk_stop(struct rsnd_ssi *ssi) { + struct rsnd_mod *mod = rsnd_mod_get(ssi); + ssi->cr_clk = 0; - rsnd_adg_ssi_clk_stop(&ssi->mod); + rsnd_adg_ssi_clk_stop(mod); } static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, @@ -182,11 +179,12 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, struct rsnd_priv *priv = rsnd_io_to_priv(io); struct rsnd_dai *rdai = rsnd_io_to_rdai(io); struct device *dev = rsnd_priv_to_dev(priv); + struct rsnd_mod *mod = rsnd_mod_get(ssi); u32 cr_mode; u32 cr; if (0 == ssi->usrcnt) { - rsnd_mod_hw_start(&ssi->mod); + rsnd_mod_hw_start(mod); if (rsnd_rdai_is_clk_master(rdai)) { struct rsnd_ssi *ssi_parent = rsnd_ssi_parent(ssi); @@ -198,7 +196,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, } } - if (rsnd_ssi_is_dma_mode(&ssi->mod)) { + if (rsnd_ssi_is_dma_mode(mod)) { cr_mode = UIEN | OIEN | /* over/under run */ DMEN; /* DMA : enable DMA */ } else { @@ -210,24 +208,25 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, cr_mode | EN; - rsnd_mod_write(&ssi->mod, SSICR, cr); + rsnd_mod_write(mod, SSICR, cr); /* enable WS continue */ if (rsnd_rdai_is_clk_master(rdai)) - rsnd_mod_write(&ssi->mod, SSIWSR, CONT); + rsnd_mod_write(mod, SSIWSR, CONT); /* clear error status */ - rsnd_mod_write(&ssi->mod, SSISR, 0); + rsnd_mod_write(mod, SSISR, 0); ssi->usrcnt++; dev_dbg(dev, "%s[%d] hw started\n", - rsnd_mod_name(&ssi->mod), rsnd_mod_id(&ssi->mod)); + rsnd_mod_name(mod), rsnd_mod_id(mod)); } static void rsnd_ssi_hw_stop(struct rsnd_dai_stream *io, struct rsnd_ssi *ssi) { - struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod); + struct rsnd_mod *mod = rsnd_mod_get(ssi); + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_dai *rdai = rsnd_io_to_rdai(io); struct device *dev = rsnd_priv_to_dev(priv); u32 cr; @@ -247,15 +246,15 @@ static void rsnd_ssi_hw_stop(struct rsnd_dai_stream *io, struct rsnd_ssi *ssi) cr = ssi->cr_own | ssi->cr_clk; - rsnd_mod_write(&ssi->mod, SSICR, cr | EN); - rsnd_ssi_status_check(&ssi->mod, DIRQ); + rsnd_mod_write(mod, SSICR, cr | EN); + rsnd_ssi_status_check(mod, DIRQ); /* * disable SSI, * and, wait idle state */ - rsnd_mod_write(&ssi->mod, SSICR, cr); /* disabled all */ - rsnd_ssi_status_check(&ssi->mod, IIRQ); + rsnd_mod_write(mod, SSICR, cr); /* disabled all */ + rsnd_ssi_status_check(mod, IIRQ); if (rsnd_rdai_is_clk_master(rdai)) { struct rsnd_ssi *ssi_parent = rsnd_ssi_parent(ssi); @@ -266,13 +265,13 @@ static void rsnd_ssi_hw_stop(struct rsnd_dai_stream *io, struct rsnd_ssi *ssi) rsnd_ssi_master_clk_stop(ssi); } - rsnd_mod_hw_stop(&ssi->mod); + rsnd_mod_hw_stop(mod); ssi->chan = 0; } dev_dbg(dev, "%s[%d] hw stopped\n", - rsnd_mod_name(&ssi->mod), rsnd_mod_id(&ssi->mod)); + rsnd_mod_name(mod), rsnd_mod_id(mod)); } /* @@ -371,7 +370,7 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod, /* It will be removed on rsnd_ssi_hw_stop */ ssi->chan = chan; if (ssi_parent) - return rsnd_ssi_hw_params(&ssi_parent->mod, io, + return rsnd_ssi_hw_params(rsnd_mod_get(ssi_parent), io, substream, params); return 0; @@ -379,12 +378,14 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod, static void rsnd_ssi_record_error(struct rsnd_ssi *ssi, u32 status) { + struct rsnd_mod *mod = rsnd_mod_get(ssi); + /* under/over flow error */ if (status & (UIRQ | OIRQ)) { ssi->err++; /* clear error status */ - rsnd_mod_write(&ssi->mod, SSISR, 0); + rsnd_mod_write(mod, SSISR, 0); } } @@ -656,7 +657,7 @@ struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id) if (WARN_ON(id < 0 || id >= rsnd_ssi_nr(priv))) id = 0; - return &((struct rsnd_ssi *)(priv->ssi) + id)->mod; + return rsnd_mod_get((struct rsnd_ssi *)(priv->ssi) + id); } int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod) @@ -668,10 +669,12 @@ int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod) static void rsnd_ssi_parent_setup(struct rsnd_priv *priv, struct rsnd_ssi *ssi) { - if (!rsnd_ssi_is_pin_sharing(&ssi->mod)) + struct rsnd_mod *mod = rsnd_mod_get(ssi); + + if (!rsnd_ssi_is_pin_sharing(mod)) return; - switch (rsnd_mod_id(&ssi->mod)) { + switch (rsnd_mod_id(mod)) { case 1: case 2: ssi->parent = rsnd_mod_to_ssi(rsnd_ssi_mod_get(priv, 0)); @@ -794,7 +797,8 @@ int rsnd_ssi_probe(struct platform_device *pdev, else if (rsnd_ssi_pio_available(ssi)) ops = &rsnd_ssi_pio_ops; - ret = rsnd_mod_init(priv, &ssi->mod, ops, clk, RSND_MOD_SSI, i); + ret = rsnd_mod_init(priv, rsnd_mod_get(ssi), ops, clk, + RSND_MOD_SSI, i); if (ret) return ret; @@ -811,6 +815,6 @@ void rsnd_ssi_remove(struct platform_device *pdev, int i; for_each_rsnd_ssi(ssi, priv, i) { - rsnd_mod_quit(&ssi->mod); + rsnd_mod_quit(rsnd_mod_get(ssi)); } } diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c index abb0d956231c..76b2ab8c2b4a 100644 --- a/sound/soc/sh/siu_dai.c +++ b/sound/soc/sh/siu_dai.c @@ -738,7 +738,7 @@ static int siu_probe(struct platform_device *pdev) struct siu_info *info; int ret; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = devm_kmalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; siu_i2s_data = info; @@ -746,7 +746,7 @@ static int siu_probe(struct platform_device *pdev) ret = request_firmware(&fw_entry, "siu_spb.bin", &pdev->dev); if (ret) - goto ereqfw; + return ret; /* * Loaded firmware is "const" - read only, but we have to modify it in @@ -757,89 +757,52 @@ static int siu_probe(struct platform_device *pdev) release_firmware(fw_entry); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto egetres; - } + if (!res) + return -ENODEV; - region = request_mem_region(res->start, resource_size(res), - pdev->name); + region = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name); if (!region) { dev_err(&pdev->dev, "SIU region already claimed\n"); - ret = -EBUSY; - goto ereqmemreg; + return -EBUSY; } - ret = -ENOMEM; - info->pram = ioremap(res->start, PRAM_SIZE); + info->pram = devm_ioremap(&pdev->dev, res->start, PRAM_SIZE); if (!info->pram) - goto emappram; - info->xram = ioremap(res->start + XRAM_OFFSET, XRAM_SIZE); + return -ENOMEM; + info->xram = devm_ioremap(&pdev->dev, res->start + XRAM_OFFSET, + XRAM_SIZE); if (!info->xram) - goto emapxram; - info->yram = ioremap(res->start + YRAM_OFFSET, YRAM_SIZE); + return -ENOMEM; + info->yram = devm_ioremap(&pdev->dev, res->start + YRAM_OFFSET, + YRAM_SIZE); if (!info->yram) - goto emapyram; - info->reg = ioremap(res->start + REG_OFFSET, resource_size(res) - - REG_OFFSET); + return -ENOMEM; + info->reg = devm_ioremap(&pdev->dev, res->start + REG_OFFSET, + resource_size(res) - REG_OFFSET); if (!info->reg) - goto emapreg; + return -ENOMEM; dev_set_drvdata(&pdev->dev, info); /* register using ARRAY version so we can keep dai name */ - ret = snd_soc_register_component(&pdev->dev, &siu_i2s_component, - &siu_i2s_dai, 1); + ret = devm_snd_soc_register_component(&pdev->dev, &siu_i2s_component, + &siu_i2s_dai, 1); if (ret < 0) - goto edaiinit; + return ret; - ret = snd_soc_register_platform(&pdev->dev, &siu_platform); + ret = devm_snd_soc_register_platform(&pdev->dev, &siu_platform); if (ret < 0) - goto esocregp; + return ret; pm_runtime_enable(&pdev->dev); - return ret; - -esocregp: - snd_soc_unregister_component(&pdev->dev); -edaiinit: - iounmap(info->reg); -emapreg: - iounmap(info->yram); -emapyram: - iounmap(info->xram); -emapxram: - iounmap(info->pram); -emappram: - release_mem_region(res->start, resource_size(res)); -ereqmemreg: -egetres: -ereqfw: - kfree(info); - - return ret; + return 0; } static int siu_remove(struct platform_device *pdev) { - struct siu_info *info = dev_get_drvdata(&pdev->dev); - struct resource *res; - pm_runtime_disable(&pdev->dev); - - snd_soc_unregister_platform(&pdev->dev); - snd_soc_unregister_component(&pdev->dev); - - iounmap(info->reg); - iounmap(info->yram); - iounmap(info->xram); - iounmap(info->pram); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) - release_mem_region(res->start, resource_size(res)); - kfree(info); - return 0; } diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 025c38fbe3c0..12a9820feac1 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -612,8 +612,15 @@ static struct snd_compr_ops soc_compr_dyn_ops = { .get_codec_caps = soc_compr_get_codec_caps }; -/* create a new compress */ -int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) +/** + * snd_soc_new_compress - create a new compress. + * + * @rtd: The runtime for which we will create compress + * @num: the device index number (zero based - shared with normal PCMs) + * + * Return: 0 for success, else error. + */ +int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_platform *platform = rtd->platform; @@ -703,3 +710,4 @@ compr_err: kfree(compr); return ret; } +EXPORT_SYMBOL_GPL(snd_soc_new_compress); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 6173d15236c3..24b096066a07 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1370,9 +1370,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order) soc_dpcm_debugfs_add(rtd); #endif - if (cpu_dai->driver->compress_dai) { + if (cpu_dai->driver->compress_new) { /*create compress_device"*/ - ret = soc_new_compress(rtd, num); + ret = cpu_dai->driver->compress_new(rtd, num); if (ret < 0) { dev_err(card->dev, "ASoC: can't create compress %s\n", dai_link->stream_name); @@ -3291,13 +3291,38 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, } EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets); +static int snd_soc_of_get_slot_mask(struct device_node *np, + const char *prop_name, + unsigned int *mask) +{ + u32 val; + const __be32 *of_slot_mask = of_get_property(np, prop_name, &val); + int i; + + if (!of_slot_mask) + return 0; + val /= sizeof(u32); + for (i = 0; i < val; i++) + if (be32_to_cpup(&of_slot_mask[i])) + *mask |= (1 << i); + + return val; +} + int snd_soc_of_parse_tdm_slot(struct device_node *np, + unsigned int *tx_mask, + unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width) { u32 val; int ret; + if (tx_mask) + snd_soc_of_get_slot_mask(np, "dai-tdm-slot-tx-mask", tx_mask); + if (rx_mask) + snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask); + if (of_property_read_bool(np, "dai-tdm-slot-num")) { ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); if (ret) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index f4bf21a5539b..016eba10b1ec 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -509,6 +509,18 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, } /** + * snd_soc_dapm_kcontrol_widget() - Returns the widget associated to a + * kcontrol + * @kcontrol: The kcontrol + */ +struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget( + struct snd_kcontrol *kcontrol) +{ + return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]; +} +EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_widget); + +/** * snd_soc_dapm_kcontrol_dapm() - Returns the dapm context associated to a * kcontrol * @kcontrol: The kcontrol @@ -779,7 +791,7 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, * Determine if a kcontrol is shared. If it is, look it up. If it isn't, * create it. Either way, add the widget into the control's widget list */ -static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, +static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w, int kci) { struct snd_soc_dapm_context *dapm = w->dapm; @@ -810,6 +822,7 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, switch (w->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: + case snd_soc_dapm_pga: wname_in_long_name = true; kcname_in_long_name = true; break; @@ -899,7 +912,7 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w) continue; if (!w->kcontrols[i]) { - ret = dapm_create_or_share_mixmux_kcontrol(w, i); + ret = dapm_create_or_share_kcontrol(w, i); if (ret < 0) return ret; } @@ -952,7 +965,7 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) return -EINVAL; } - ret = dapm_create_or_share_mixmux_kcontrol(w, 0); + ret = dapm_create_or_share_kcontrol(w, 0); if (ret < 0) return ret; @@ -967,9 +980,13 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) /* create new dapm volume control */ static int dapm_new_pga(struct snd_soc_dapm_widget *w) { - if (w->num_kcontrols) - dev_err(w->dapm->dev, - "ASoC: PGA controls not supported: '%s'\n", w->name); + int i, ret; + + for (i = 0; i < w->num_kcontrols; i++) { + ret = dapm_create_or_share_kcontrol(w, i); + if (ret < 0) + return ret; + } return 0; } @@ -3473,11 +3490,29 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_PRE_PMU: substream.stream = SNDRV_PCM_STREAM_CAPTURE; + if (source->driver->ops && source->driver->ops->startup) { + ret = source->driver->ops->startup(&substream, source); + if (ret < 0) { + dev_err(source->dev, + "ASoC: startup() failed: %d\n", ret); + goto out; + } + source->active++; + } ret = soc_dai_hw_params(&substream, params, source); if (ret < 0) goto out; substream.stream = SNDRV_PCM_STREAM_PLAYBACK; + if (sink->driver->ops && sink->driver->ops->startup) { + ret = sink->driver->ops->startup(&substream, sink); + if (ret < 0) { + dev_err(sink->dev, + "ASoC: startup() failed: %d\n", ret); + goto out; + } + sink->active++; + } ret = soc_dai_hw_params(&substream, params, sink); if (ret < 0) goto out; @@ -3497,11 +3532,23 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, if (ret != 0 && ret != -ENOTSUPP) dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret); ret = 0; + + source->active--; + if (source->driver->ops && source->driver->ops->shutdown) { + substream.stream = SNDRV_PCM_STREAM_CAPTURE; + source->driver->ops->shutdown(&substream, source); + } + + sink->active--; + if (sink->driver->ops && sink->driver->ops->shutdown) { + substream.stream = SNDRV_PCM_STREAM_PLAYBACK; + sink->driver->ops->shutdown(&substream, sink); + } break; default: WARN(1, "Unknown event %d\n", event); - return -EINVAL; + ret = -EINVAL; } out: diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 100d92b5b77e..ecd38e52285a 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -207,6 +207,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, EXPORT_SYMBOL_GPL(snd_soc_info_volsw); /** + * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls + * @kcontrol: mixer control + * @uinfo: control element information + * + * Callback to provide information about a single mixer control, or a double + * mixer control that spans 2 registers of the SX TLV type. SX TLV controls + * have a range that represents both positive and negative values either side + * of zero but without a sign bit. + * + * Returns 0 for success. + */ +int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + + snd_soc_info_volsw(kcontrol, uinfo); + /* Max represents the number of levels in an SX control not the + * maximum value, so add the minimum value back on + */ + uinfo->value.integer.max += mc->min; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx); + +/** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information @@ -560,16 +588,16 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); /** * snd_soc_limit_volume - Set new limit to an existing volume control. * - * @codec: where to look for the control + * @card: where to look for the control * @name: Name of the control * @max: new maximum limit * * Return 0 for success, else error. */ -int snd_soc_limit_volume(struct snd_soc_codec *codec, +int snd_soc_limit_volume(struct snd_soc_card *card, const char *name, int max) { - struct snd_card *card = codec->component.card->snd_card; + struct snd_card *snd_card = card->snd_card; struct snd_kcontrol *kctl; struct soc_mixer_control *mc; int found = 0; @@ -579,7 +607,7 @@ int snd_soc_limit_volume(struct snd_soc_codec *codec, if (unlikely(!name || max <= 0)) return -EINVAL; - list_for_each_entry(kctl, &card->controls, list) { + list_for_each_entry(kctl, &snd_card->controls, list) { if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) { found = 1; break; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 70e4b9d8bdcd..317395824cd7 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -34,6 +34,24 @@ #define DPCM_MAX_BE_USERS 8 +/* + * snd_soc_dai_stream_valid() - check if a DAI supports the given stream + * + * Returns true if the DAI supports the indicated stream type. + */ +static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream) +{ + struct snd_soc_pcm_stream *codec_stream; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + codec_stream = &dai->driver->playback; + else + codec_stream = &dai->driver->capture; + + /* If the codec specifies any rate at all, it supports the stream. */ + return codec_stream->rates; +} + /** * snd_soc_runtime_activate() - Increment active count for PCM runtime components * @rtd: ASoC PCM runtime that is activated @@ -371,6 +389,20 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream) /* first calculate min/max only for CODECs in the DAI link */ for (i = 0; i < rtd->num_codecs; i++) { + + /* + * Skip CODECs which don't support the current stream type. + * Otherwise, since the rate, channel, and format values will + * zero in that case, we would have no usable settings left, + * causing the resulting setup to fail. + * At least one CODEC should match, otherwise we should have + * bailed out on a higher level, since there would be no + * CODEC to support the transfer direction in that case. + */ + if (!snd_soc_dai_stream_valid(rtd->codec_dais[i], + substream->stream)) + continue; + codec_dai_drv = rtd->codec_dais[i]->driver; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) codec_stream = &codec_dai_drv->playback; @@ -827,6 +859,23 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *codec_dai = rtd->codec_dais[i]; struct snd_pcm_hw_params codec_params; + /* + * Skip CODECs which don't support the current stream type, + * the idea being that if a CODEC is not used for the currently + * set up transfer direction, it should not need to be + * configured, especially since the configuration used might + * not even be supported by that CODEC. There may be cases + * however where a CODEC needs to be set up although it is + * actually not being used for the transfer, e.g. if a + * capture-only CODEC is acting as an LRCLK and/or BCLK master + * for the DAI link including a playback-only CODEC. + * If this becomes necessary, we will have to augment the + * machine driver setup with information on how to act, so + * we can do the right thing here. + */ + if (!snd_soc_dai_stream_valid(codec_dai, substream->stream)) + continue; + /* copy params for each codec */ codec_params = *params; diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index 362c69ac1d6c..53dd085d3ee2 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c @@ -101,6 +101,15 @@ static struct snd_soc_codec_driver dummy_codec; SNDRV_PCM_FMTBIT_S32_LE | \ SNDRV_PCM_FMTBIT_U32_LE | \ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) +/* + * The dummy CODEC is only meant to be used in situations where there is no + * actual hardware. + * + * If there is actual hardware even if it does not have a control bus + * the hardware will still have constraints like supported samplerates, etc. + * which should be modelled. And the data flow graph also should be modelled + * using DAPM. + */ static struct snd_soc_dai_driver dummy_dai = { .name = "snd-soc-dummy-dai", .playback = { diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig index 0a53053495f3..4fb91412ebec 100644 --- a/sound/soc/spear/Kconfig +++ b/sound/soc/spear/Kconfig @@ -1,6 +1,6 @@ config SND_SPEAR_SOC tristate - select SND_DMAENGINE_PCM + select SND_SOC_GENERIC_DMAENGINE_PCM config SND_SPEAR_SPDIF_OUT tristate diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index f6eefe1b8f8f..843f037a317d 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -989,8 +989,8 @@ static int uni_player_parse_dt(struct platform_device *pdev, if (!info) return -ENOMEM; - of_property_read_u32(pnode, "version", &player->ver); - if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { + if (of_property_read_u32(pnode, "version", &player->ver) || + player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { dev_err(dev, "Unknown uniperipheral version "); return -EINVAL; } @@ -998,10 +998,16 @@ static int uni_player_parse_dt(struct platform_device *pdev, if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) info->underflow_enabled = 1; - of_property_read_u32(pnode, "uniperiph-id", &info->id); + if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) { + dev_err(dev, "uniperipheral id not defined"); + return -EINVAL; + } /* Read the device mode property */ - of_property_read_string(pnode, "mode", &mode); + if (of_property_read_string(pnode, "mode", &mode)) { + dev_err(dev, "uniperipheral mode not defined"); + return -EINVAL; + } if (strcasecmp(mode, "hdmi") == 0) info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index c502626f339b..f791239a3087 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c @@ -316,7 +316,11 @@ static int uni_reader_parse_dt(struct platform_device *pdev, if (!info) return -ENOMEM; - of_property_read_u32(node, "version", &reader->ver); + if (of_property_read_u32(node, "version", &reader->ver) || + reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { + dev_err(&pdev->dev, "Unknown uniperipheral version "); + return -EINVAL; + } /* Save the info structure */ reader->info = info; diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig new file mode 100644 index 000000000000..84c72ec6ad73 --- /dev/null +++ b/sound/soc/sunxi/Kconfig @@ -0,0 +1,11 @@ +menu "Allwinner SoC Audio support" + +config SND_SUN4I_CODEC + tristate "Allwinner A10 Codec Support" + select SND_SOC_GENERIC_DMAENGINE_PCM + select REGMAP_MMIO + help + Select Y or M to add support for the Codec embedded in the Allwinner + A10 and affiliated SoCs. + +endmenu diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile new file mode 100644 index 000000000000..ea8a08c881d6 --- /dev/null +++ b/sound/soc/sunxi/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o + diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c new file mode 100644 index 000000000000..8d59d83b5aa4 --- /dev/null +++ b/sound/soc/sunxi/sun4i-codec.c @@ -0,0 +1,719 @@ +/* + * Copyright 2014 Emilio López <emilio@elopez.com.ar> + * Copyright 2014 Jon Smirl <jonsmirl@gmail.com> + * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * Based on the Allwinner SDK driver, released under the GPL. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/clk.h> +#include <linux/regmap.h> + +#include <sound/core.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> +#include <sound/tlv.h> +#include <sound/initval.h> +#include <sound/dmaengine_pcm.h> + +/* Codec DAC register offsets and bit fields */ +#define SUN4I_CODEC_DAC_DPC (0x00) +#define SUN4I_CODEC_DAC_DPC_EN_DA (31) +#define SUN4I_CODEC_DAC_DPC_DVOL (12) +#define SUN4I_CODEC_DAC_FIFOC (0x04) +#define SUN4I_CODEC_DAC_FIFOC_DAC_FS (29) +#define SUN4I_CODEC_DAC_FIFOC_FIR_VERSION (28) +#define SUN4I_CODEC_DAC_FIFOC_SEND_LASAT (26) +#define SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE (24) +#define SUN4I_CODEC_DAC_FIFOC_DRQ_CLR_CNT (21) +#define SUN4I_CODEC_DAC_FIFOC_TX_TRIG_LEVEL (8) +#define SUN4I_CODEC_DAC_FIFOC_MONO_EN (6) +#define SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS (5) +#define SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN (4) +#define SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH (0) +#define SUN4I_CODEC_DAC_FIFOS (0x08) +#define SUN4I_CODEC_DAC_TXDATA (0x0c) +#define SUN4I_CODEC_DAC_ACTL (0x10) +#define SUN4I_CODEC_DAC_ACTL_DACAENR (31) +#define SUN4I_CODEC_DAC_ACTL_DACAENL (30) +#define SUN4I_CODEC_DAC_ACTL_MIXEN (29) +#define SUN4I_CODEC_DAC_ACTL_LDACLMIXS (15) +#define SUN4I_CODEC_DAC_ACTL_RDACRMIXS (14) +#define SUN4I_CODEC_DAC_ACTL_LDACRMIXS (13) +#define SUN4I_CODEC_DAC_ACTL_DACPAS (8) +#define SUN4I_CODEC_DAC_ACTL_MIXPAS (7) +#define SUN4I_CODEC_DAC_ACTL_PA_MUTE (6) +#define SUN4I_CODEC_DAC_ACTL_PA_VOL (0) +#define SUN4I_CODEC_DAC_TUNE (0x14) +#define SUN4I_CODEC_DAC_DEBUG (0x18) + +/* Codec ADC register offsets and bit fields */ +#define SUN4I_CODEC_ADC_FIFOC (0x1c) +#define SUN4I_CODEC_ADC_FIFOC_EN_AD (28) +#define SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE (24) +#define SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL (8) +#define SUN4I_CODEC_ADC_FIFOC_MONO_EN (7) +#define SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS (6) +#define SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN (4) +#define SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH (0) +#define SUN4I_CODEC_ADC_FIFOS (0x20) +#define SUN4I_CODEC_ADC_RXDATA (0x24) +#define SUN4I_CODEC_ADC_ACTL (0x28) +#define SUN4I_CODEC_ADC_ACTL_ADC_R_EN (31) +#define SUN4I_CODEC_ADC_ACTL_ADC_L_EN (30) +#define SUN4I_CODEC_ADC_ACTL_PREG1EN (29) +#define SUN4I_CODEC_ADC_ACTL_PREG2EN (28) +#define SUN4I_CODEC_ADC_ACTL_VMICEN (27) +#define SUN4I_CODEC_ADC_ACTL_VADCG (20) +#define SUN4I_CODEC_ADC_ACTL_ADCIS (17) +#define SUN4I_CODEC_ADC_ACTL_PA_EN (4) +#define SUN4I_CODEC_ADC_ACTL_DDE (3) +#define SUN4I_CODEC_ADC_DEBUG (0x2c) + +/* Other various ADC registers */ +#define SUN4I_CODEC_DAC_TXCNT (0x30) +#define SUN4I_CODEC_ADC_RXCNT (0x34) +#define SUN4I_CODEC_AC_SYS_VERI (0x38) +#define SUN4I_CODEC_AC_MIC_PHONE_CAL (0x3c) + +struct sun4i_codec { + struct device *dev; + struct regmap *regmap; + struct clk *clk_apb; + struct clk *clk_module; + + struct snd_dmaengine_dai_dma_data playback_dma_data; +}; + +static void sun4i_codec_start_playback(struct sun4i_codec *scodec) +{ + /* + * FIXME: according to the BSP, we might need to drive a PA + * GPIO high here on some boards + */ + + /* Flush TX FIFO */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH), + BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH)); + + /* Enable DAC DRQ */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN), + BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN)); +} + +static void sun4i_codec_stop_playback(struct sun4i_codec *scodec) +{ + /* + * FIXME: according to the BSP, we might need to drive a PA + * GPIO low here on some boards + */ + + /* Disable DAC DRQ */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN), + 0); +} + +static int sun4i_codec_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card); + + if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) + return -ENOTSUPP; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + sun4i_codec_start_playback(scodec); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + sun4i_codec_stop_playback(scodec); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int sun4i_codec_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card); + u32 val; + + if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) + return -ENOTSUPP; + + /* Flush the TX FIFO */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH), + BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH)); + + /* Set TX FIFO Empty Trigger Level */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + 0x3f << SUN4I_CODEC_DAC_FIFOC_TX_TRIG_LEVEL, + 0xf << SUN4I_CODEC_DAC_FIFOC_TX_TRIG_LEVEL); + + if (substream->runtime->rate > 32000) + /* Use 64 bits FIR filter */ + val = 0; + else + /* Use 32 bits FIR filter */ + val = BIT(SUN4I_CODEC_DAC_FIFOC_FIR_VERSION); + + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_FIR_VERSION), + val); + + /* Send zeros when we have an underrun */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_SEND_LASAT), + 0); + + return 0; +} + +static unsigned long sun4i_codec_get_mod_freq(struct snd_pcm_hw_params *params) +{ + unsigned int rate = params_rate(params); + + switch (rate) { + case 176400: + case 88200: + case 44100: + case 33075: + case 22050: + case 14700: + case 11025: + case 7350: + return 22579200; + + case 192000: + case 96000: + case 48000: + case 32000: + case 24000: + case 16000: + case 12000: + case 8000: + return 24576000; + + default: + return 0; + } +} + +static int sun4i_codec_get_hw_rate(struct snd_pcm_hw_params *params) +{ + unsigned int rate = params_rate(params); + + switch (rate) { + case 192000: + case 176400: + return 6; + + case 96000: + case 88200: + return 7; + + case 48000: + case 44100: + return 0; + + case 32000: + case 33075: + return 1; + + case 24000: + case 22050: + return 2; + + case 16000: + case 14700: + return 3; + + case 12000: + case 11025: + return 4; + + case 8000: + case 7350: + return 5; + + default: + return -EINVAL; + } +} + +static int sun4i_codec_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card); + unsigned long clk_freq; + int hwrate; + u32 val; + + if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) + return -ENOTSUPP; + + clk_freq = sun4i_codec_get_mod_freq(params); + if (!clk_freq) + return -EINVAL; + + if (clk_set_rate(scodec->clk_module, clk_freq)) + return -EINVAL; + + hwrate = sun4i_codec_get_hw_rate(params); + if (hwrate < 0) + return hwrate; + + /* Set DAC sample rate */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + 7 << SUN4I_CODEC_DAC_FIFOC_DAC_FS, + hwrate << SUN4I_CODEC_DAC_FIFOC_DAC_FS); + + /* Set the number of channels we want to use */ + if (params_channels(params) == 1) + val = BIT(SUN4I_CODEC_DAC_FIFOC_MONO_EN); + else + val = 0; + + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_MONO_EN), + val); + + /* Set the number of sample bits to either 16 or 24 bits */ + if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min == 32) { + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS), + BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS)); + + /* Set TX FIFO mode to padding the LSBs with 0 */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE), + 0); + + scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } else { + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS), + 0); + + /* Set TX FIFO mode to repeat the MSB */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE), + BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE)); + + scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + } + + return 0; +} + +static int sun4i_codec_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card); + + /* + * Stop issuing DRQ when we have room for less than 16 samples + * in our TX FIFO + */ + regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC, + 3 << SUN4I_CODEC_DAC_FIFOC_DRQ_CLR_CNT, + 3 << SUN4I_CODEC_DAC_FIFOC_DRQ_CLR_CNT); + + return clk_prepare_enable(scodec->clk_module); +} + +static void sun4i_codec_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card); + + clk_disable_unprepare(scodec->clk_module); +} + +static const struct snd_soc_dai_ops sun4i_codec_dai_ops = { + .startup = sun4i_codec_startup, + .shutdown = sun4i_codec_shutdown, + .trigger = sun4i_codec_trigger, + .hw_params = sun4i_codec_hw_params, + .prepare = sun4i_codec_prepare, +}; + +static struct snd_soc_dai_driver sun4i_codec_dai = { + .name = "Codec", + .ops = &sun4i_codec_dai_ops, + .playback = { + .stream_name = "Codec Playback", + .channels_min = 1, + .channels_max = 2, + .rate_min = 8000, + .rate_max = 192000, + .rates = SNDRV_PCM_RATE_8000_48000 | + SNDRV_PCM_RATE_96000 | + SNDRV_PCM_RATE_192000 | + SNDRV_PCM_RATE_KNOT, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S32_LE, + .sig_bits = 24, + }, +}; + +/*** Codec ***/ +static const struct snd_kcontrol_new sun4i_codec_pa_mute = + SOC_DAPM_SINGLE("Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_PA_MUTE, 1, 0); + +static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1); + +static const struct snd_kcontrol_new sun4i_codec_widgets[] = { + SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0, + sun4i_codec_pa_volume_scale), +}; + +static const struct snd_kcontrol_new sun4i_codec_left_mixer_controls[] = { + SOC_DAPM_SINGLE("Left DAC Playback Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_LDACLMIXS, 1, 0), +}; + +static const struct snd_kcontrol_new sun4i_codec_right_mixer_controls[] = { + SOC_DAPM_SINGLE("Right DAC Playback Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_RDACRMIXS, 1, 0), + SOC_DAPM_SINGLE("Left DAC Playback Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_LDACRMIXS, 1, 0), +}; + +static const struct snd_kcontrol_new sun4i_codec_pa_mixer_controls[] = { + SOC_DAPM_SINGLE("DAC Playback Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_DACPAS, 1, 0), + SOC_DAPM_SINGLE("Mixer Playback Switch", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_MIXPAS, 1, 0), +}; + +static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = { + /* Digital parts of the DACs */ + SND_SOC_DAPM_SUPPLY("DAC", SUN4I_CODEC_DAC_DPC, + SUN4I_CODEC_DAC_DPC_EN_DA, 0, + NULL, 0), + + /* Analog parts of the DACs */ + SND_SOC_DAPM_DAC("Left DAC", "Codec Playback", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_DACAENL, 0), + SND_SOC_DAPM_DAC("Right DAC", "Codec Playback", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_DACAENR, 0), + + /* Mixers */ + SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + sun4i_codec_left_mixer_controls, + ARRAY_SIZE(sun4i_codec_left_mixer_controls)), + SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + sun4i_codec_right_mixer_controls, + ARRAY_SIZE(sun4i_codec_right_mixer_controls)), + + /* Global Mixer Enable */ + SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL, + SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0), + + /* Pre-Amplifier */ + SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL, + SUN4I_CODEC_ADC_ACTL_PA_EN, 0, + sun4i_codec_pa_mixer_controls, + ARRAY_SIZE(sun4i_codec_pa_mixer_controls)), + SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0, + &sun4i_codec_pa_mute), + + SND_SOC_DAPM_OUTPUT("HP Right"), + SND_SOC_DAPM_OUTPUT("HP Left"), +}; + +static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = { + /* Left DAC Routes */ + { "Left DAC", NULL, "DAC" }, + + /* Right DAC Routes */ + { "Right DAC", NULL, "DAC" }, + + /* Right Mixer Routes */ + { "Right Mixer", NULL, "Mixer Enable" }, + { "Right Mixer", "Left DAC Playback Switch", "Left DAC" }, + { "Right Mixer", "Right DAC Playback Switch", "Right DAC" }, + + /* Left Mixer Routes */ + { "Left Mixer", NULL, "Mixer Enable" }, + { "Left Mixer", "Left DAC Playback Switch", "Left DAC" }, + + /* Pre-Amplifier Mixer Routes */ + { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" }, + { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" }, + { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" }, + { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" }, + + /* PA -> HP path */ + { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" }, + { "HP Right", NULL, "Pre-Amplifier Mute" }, + { "HP Left", NULL, "Pre-Amplifier Mute" }, +}; + +static struct snd_soc_codec_driver sun4i_codec_codec = { + .controls = sun4i_codec_widgets, + .num_controls = ARRAY_SIZE(sun4i_codec_widgets), + .dapm_widgets = sun4i_codec_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(sun4i_codec_dapm_widgets), + .dapm_routes = sun4i_codec_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(sun4i_codec_dapm_routes), +}; + +static const struct snd_soc_component_driver sun4i_codec_component = { + .name = "sun4i-codec", +}; + +#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_8000_192000 +#define SUN4I_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +static int sun4i_codec_dai_probe(struct snd_soc_dai *dai) +{ + struct snd_soc_card *card = snd_soc_dai_get_drvdata(dai); + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card); + + snd_soc_dai_init_dma_data(dai, &scodec->playback_dma_data, + NULL); + + return 0; +} + +static struct snd_soc_dai_driver dummy_cpu_dai = { + .name = "sun4i-codec-cpu-dai", + .probe = sun4i_codec_dai_probe, + .playback = { + .stream_name = "Playback", + .channels_min = 1, + .channels_max = 2, + .rates = SUN4I_CODEC_RATES, + .formats = SUN4I_CODEC_FORMATS, + .sig_bits = 24, + }, +}; + +static const struct regmap_config sun4i_codec_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = SUN4I_CODEC_AC_MIC_PHONE_CAL, +}; + +static const struct of_device_id sun4i_codec_of_match[] = { + { .compatible = "allwinner,sun4i-a10-codec" }, + { .compatible = "allwinner,sun7i-a20-codec" }, + {} +}; +MODULE_DEVICE_TABLE(of, sun4i_codec_of_match); + +static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, + int *num_links) +{ + struct snd_soc_dai_link *link = devm_kzalloc(dev, sizeof(*link), + GFP_KERNEL); + if (!link) + return NULL; + + link->name = "cdc"; + link->stream_name = "CDC PCM"; + link->codec_dai_name = "Codec"; + link->cpu_dai_name = dev_name(dev); + link->codec_name = dev_name(dev); + link->platform_name = dev_name(dev); + link->dai_fmt = SND_SOC_DAIFMT_I2S; + + *num_links = 1; + + return link; +}; + +static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) +{ + struct snd_soc_card *card; + int ret; + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return NULL; + + card->dai_link = sun4i_codec_create_link(dev, &card->num_links); + if (!card->dai_link) + return NULL; + + card->dev = dev; + card->name = "sun4i-codec"; + + ret = snd_soc_of_parse_audio_routing(card, "routing"); + if (ret) { + dev_err(dev, "Failed to create our audio routing\n"); + return NULL; + } + + return card; +}; + +static int sun4i_codec_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card; + struct sun4i_codec *scodec; + struct resource *res; + void __iomem *base; + int ret; + + scodec = devm_kzalloc(&pdev->dev, sizeof(*scodec), GFP_KERNEL); + if (!scodec) + return -ENOMEM; + + scodec->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed to map the registers\n"); + return PTR_ERR(base); + } + + scodec->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &sun4i_codec_regmap_config); + if (IS_ERR(scodec->regmap)) { + dev_err(&pdev->dev, "Failed to create our regmap\n"); + return PTR_ERR(scodec->regmap); + } + + /* Get the clocks from the DT */ + scodec->clk_apb = devm_clk_get(&pdev->dev, "apb"); + if (IS_ERR(scodec->clk_apb)) { + dev_err(&pdev->dev, "Failed to get the APB clock\n"); + return PTR_ERR(scodec->clk_apb); + } + + scodec->clk_module = devm_clk_get(&pdev->dev, "codec"); + if (IS_ERR(scodec->clk_module)) { + dev_err(&pdev->dev, "Failed to get the module clock\n"); + return PTR_ERR(scodec->clk_module); + } + + /* Enable the bus clock */ + if (clk_prepare_enable(scodec->clk_apb)) { + dev_err(&pdev->dev, "Failed to enable the APB clock\n"); + return -EINVAL; + } + + /* DMA configuration for TX FIFO */ + scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA; + scodec->playback_dma_data.maxburst = 4; + scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + + ret = snd_soc_register_codec(&pdev->dev, &sun4i_codec_codec, + &sun4i_codec_dai, 1); + if (ret) { + dev_err(&pdev->dev, "Failed to register our codec\n"); + goto err_clk_disable; + } + + ret = devm_snd_soc_register_component(&pdev->dev, + &sun4i_codec_component, + &dummy_cpu_dai, 1); + if (ret) { + dev_err(&pdev->dev, "Failed to register our DAI\n"); + goto err_unregister_codec; + } + + ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); + if (ret) { + dev_err(&pdev->dev, "Failed to register against DMAEngine\n"); + goto err_unregister_codec; + } + + card = sun4i_codec_create_card(&pdev->dev); + if (!card) { + dev_err(&pdev->dev, "Failed to create our card\n"); + goto err_unregister_codec; + } + + platform_set_drvdata(pdev, card); + snd_soc_card_set_drvdata(card, scodec); + + ret = snd_soc_register_card(card); + if (ret) { + dev_err(&pdev->dev, "Failed to register our card\n"); + goto err_unregister_codec; + } + + return 0; + +err_unregister_codec: + snd_soc_unregister_codec(&pdev->dev); +err_clk_disable: + clk_disable_unprepare(scodec->clk_apb); + return ret; +} + +static int sun4i_codec_remove(struct platform_device *pdev) +{ + struct snd_soc_card *card = platform_get_drvdata(pdev); + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card); + + snd_soc_unregister_card(card); + snd_soc_unregister_codec(&pdev->dev); + clk_disable_unprepare(scodec->clk_apb); + + return 0; +} + +static struct platform_driver sun4i_codec_driver = { + .driver = { + .name = "sun4i-codec", + .of_match_table = sun4i_codec_of_match, + }, + .probe = sun4i_codec_probe, + .remove = sun4i_codec_remove, +}; +module_platform_driver(sun4i_codec_driver); + +MODULE_DESCRIPTION("Allwinner A10 codec driver"); +MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>"); +MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c index 4e0c0e502ade..ba9fc099cf67 100644 --- a/sound/soc/ux500/mop500.c +++ b/sound/soc/ux500/mop500.c @@ -152,6 +152,7 @@ static const struct of_device_id snd_soc_mop500_match[] = { { .compatible = "stericsson,snd-soc-mop500", }, {}, }; +MODULE_DEVICE_TABLE(of, snd_soc_mop500_match); static struct platform_driver snd_soc_mop500_driver = { .driver = { diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c index f5df08ded770..6ba8ae9ecc7a 100644 --- a/sound/soc/ux500/ux500_msp_dai.c +++ b/sound/soc/ux500/ux500_msp_dai.c @@ -843,6 +843,7 @@ static const struct of_device_id ux500_msp_i2s_match[] = { { .compatible = "stericsson,ux500-msp-i2s", }, {}, }; +MODULE_DEVICE_TABLE(of, ux500_msp_i2s_match); static struct platform_driver msp_i2s_driver = { .driver = { diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index 82e350e9501c..ac75816ada7c 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c @@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu) struct snd_seq_oss_reg *arg; struct snd_seq_device *dev; - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, + /* using device#1 here for avoiding conflicts with OPL3 */ + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, sizeof(struct snd_seq_oss_reg), &dev) < 0) return; diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ libelf-getphdrnum \ libelf-mmap \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libpython-version \ @@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ timerfd \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid FEATURE_DISPLAY ?= \ dwarf \ @@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ libbfd \ libelf \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libslang \ libunwind \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. # If in the future we need per-feature checks/flags for features not diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -19,6 +19,7 @@ FILES= \ test-libelf-getphdrnum.bin \ test-libelf-mmap.bin \ test-libnuma.bin \ + test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ test-libpython-version.bin \ @@ -34,7 +35,8 @@ FILES= \ test-compile-x32.bin \ test-zlib.bin \ test-lzma.bin \ - test-bpf.bin + test-bpf.bin \ + test-get_cpuid.bin CC := $(CROSS_COMPILE)gcc -MD PKG_CONFIG := $(CROSS_COMPILE)pkg-config @@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: test-libnuma.bin: $(BUILD) -lnuma +test-numa_num_possible_cpus.bin: + $(BUILD) -lnuma + test-libunwind.bin: $(BUILD) -lelf @@ -162,6 +167,9 @@ test-zlib.bin: test-lzma.bin: $(BUILD) -llzma +test-get_cpuid.bin: + $(BUILD) + test-bpf.bin: $(BUILD) diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -77,6 +77,10 @@ # include "test-libnuma.c" #undef main +#define main main_test_numa_num_possible_cpus +# include "test-numa_num_possible_cpus.c" +#undef main + #define main main_test_timerfd # include "test-timerfd.c" #undef main @@ -117,6 +121,10 @@ # include "test-lzma.c" #undef main +#define main main_test_get_cpuid +# include "test-get_cpuid.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -136,6 +144,7 @@ int main(int argc, char *argv[]) main_test_libbfd(); main_test_backtrace(); main_test_libnuma(); + main_test_numa_num_possible_cpus(); main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); @@ -143,6 +152,7 @@ int main(int argc, char *argv[]) main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_lzma(); + main_test_get_cpuid(); return 0; } diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c @@ -0,0 +1,7 @@ +#include <cpuid.h> + +int main(void) +{ + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); +} diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c @@ -0,0 +1,6 @@ +#include <numa.h> + +int main(void) +{ + return numa_num_possible_cpus(); +} diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct format_field *field; struct printk_map *printk; long long val, fval; - unsigned long addr; + unsigned long long addr; char *str; unsigned char *hex; int print; @@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, */ if (!(field->flags & FIELD_IS_ARRAY) && field->size == pevent->long_size) { - addr = *(unsigned long *)(data + field->offset); + + /* Handle heterogeneous recording and processing + * architectures + * + * CASE I: + * Traces recorded on 32-bit devices (32-bit + * addressing) and processed on 64-bit devices: + * In this case, only 32 bits should be read. + * + * CASE II: + * Traces recorded on 64 bit devices and processed + * on 32-bit devices: + * In this case, 64 bits must be read. + */ + addr = (pevent->long_size == 8) ? + *(unsigned long long *)(data + field->offset) : + (unsigned long long)*(unsigned int *)(data + field->offset); + /* Check if it matches a print format */ printk = find_printk(pevent, addr); if (printk) trace_seq_puts(s, printk->printk); else - trace_seq_printf(s, "%lx", addr); + trace_seq_printf(s, "%llx", addr); break; } str = malloc(len + 1); diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc CYC packets are not requested by default. -no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. - - It stops the driver resetting the byte count to zero whenever - enabling the trace (for example on context switches) which in - turn results in no PSB being forced. However some processors - will produce a PSB anyway. - - In any case, there is still a PSB when the trace is enabled for - the first time. - - no_force_psb can be used to slightly decrease the trace size but - may make it harder for the decoder to recover from errors. - - no_force_psb is not selected by default. - new snapshot option ------------------- diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index eb51325e8ad9..284a76e04628 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -768,8 +768,8 @@ static int process_exit_event(struct perf_tool *tool, if (!evsel->attr.sample_id_all) { sample->cpu = 0; sample->time = 0; - sample->tid = event->comm.tid; - sample->pid = event->comm.pid; + sample->tid = event->fork.tid; + sample->pid = event->fork.pid; } print_sample_start(sample, thread, evsel); perf_event__fprintf(event, stdout); diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -573,9 +573,14 @@ ifndef NO_LIBNUMA msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); NO_LIBNUMA := 1 else - CFLAGS += -DHAVE_LIBNUMA_SUPPORT - EXTLIBS += -lnuma - $(call detected,CONFIG_NUMA) + ifeq ($(feature-numa_num_possible_cpus), 0) + msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); + NO_LIBNUMA := 1 + else + CFLAGS += -DHAVE_LIBNUMA_SUPPORT + EXTLIBS += -lnuma + $(call detected,CONFIG_NUMA) + endif endif endif @@ -621,8 +626,13 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - $(call detected,CONFIG_AUXTRACE) - CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-get_cpuid), 0) + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + NO_AUXTRACE := 1 + else + $(call detected,CONFIG_AUXTRACE) + CFLAGS += -DHAVE_AUXTRACE_SUPPORT + endif endif # Among the variables below, these: diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 1aa21c90731b..5b83f56a3b6f 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c @@ -34,6 +34,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) .disabled = 1, .freq = 1, }; + struct cpu_map *cpus; + struct thread_map *threads; attr.sample_freq = 500; @@ -50,14 +52,19 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) } perf_evlist__add(evlist, evsel); - evlist->cpus = cpu_map__dummy_new(); - evlist->threads = thread_map__new_by_tid(getpid()); - if (!evlist->cpus || !evlist->threads) { + cpus = cpu_map__dummy_new(); + threads = thread_map__new_by_tid(getpid()); + if (!cpus || !threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); - goto out_delete_evlist; + goto out_free_maps; } + perf_evlist__set_maps(evlist, cpus, threads); + + cpus = NULL; + threads = NULL; + if (perf_evlist__open(evlist)) { const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; @@ -107,6 +114,9 @@ next_event: err = -1; } +out_free_maps: + cpu_map__put(cpus); + thread_map__put(threads); out_delete_evlist: perf_evlist__delete(evlist); return err; diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 3a8fedef83bc..add16385f13e 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -43,6 +43,8 @@ int test__task_exit(void) }; const char *argv[] = { "true", NULL }; char sbuf[STRERR_BUFSIZE]; + struct cpu_map *cpus; + struct thread_map *threads; signal(SIGCHLD, sig_handler); @@ -58,14 +60,19 @@ int test__task_exit(void) * perf_evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ - evlist->cpus = cpu_map__dummy_new(); - evlist->threads = thread_map__new_by_tid(-1); - if (!evlist->cpus || !evlist->threads) { + cpus = cpu_map__dummy_new(); + threads = thread_map__new_by_tid(-1); + if (!cpus || !threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); - goto out_delete_evlist; + goto out_free_maps; } + perf_evlist__set_maps(evlist, cpus, threads); + + cpus = NULL; + threads = NULL; + err = perf_evlist__prepare_workload(evlist, &target, argv, false, workload_exec_failed_signal); if (err < 0) { @@ -114,6 +121,9 @@ retry: err = -1; } +out_free_maps: + cpu_map__put(cpus); + thread_map__put(threads); out_delete_evlist: perf_evlist__delete(evlist); return err; diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index cf86f2d3a5e7..c04c60d4863c 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1968,7 +1968,8 @@ skip_annotation: &options[nr_options], dso); nr_options += add_map_opt(browser, &actions[nr_options], &options[nr_options], - browser->selection->map); + browser->selection ? + browser->selection->map : NULL); /* perf script support */ if (browser->he_selection) { @@ -1976,6 +1977,15 @@ skip_annotation: &actions[nr_options], &options[nr_options], thread, NULL); + /* + * Note that browser->selection != NULL + * when browser->he_selection is not NULL, + * so we don't need to check browser->selection + * before fetching browser->selection->sym like what + * we do before fetching browser->selection->map. + * + * See hist_browser__show_entry. + */ nr_options += add_script_opt(browser, &actions[nr_options], &options[nr_options], diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 349bc96ca1fe..e5f18a288b74 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -17,6 +17,7 @@ libperf-y += levenshtein.o libperf-y += llvm-utils.o libperf-y += parse-options.o libperf-y += parse-events.o +libperf-y += perf_regs.o libperf-y += path.o libperf-y += rbtree.o libperf-y += bitmap.o @@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o libperf-y += scripting-engines/ -libperf-$(CONFIG_PERF_REGS) += perf_regs.o libperf-$(CONFIG_ZLIB) += zlib.o libperf-$(CONFIG_LZMA) += lzma.o diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index d51a5200c8af..c8fc8a258f42 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -124,6 +124,33 @@ void perf_evlist__delete(struct perf_evlist *evlist) free(evlist); } +static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + /* + * We already have cpus for evsel (via PMU sysfs) so + * keep it, if there's no target cpu list defined. + */ + if (!evsel->own_cpus || evlist->has_user_cpus) { + cpu_map__put(evsel->cpus); + evsel->cpus = cpu_map__get(evlist->cpus); + } else if (evsel->cpus != evsel->own_cpus) { + cpu_map__put(evsel->cpus); + evsel->cpus = cpu_map__get(evsel->own_cpus); + } + + thread_map__put(evsel->threads); + evsel->threads = thread_map__get(evlist->threads); +} + +static void perf_evlist__propagate_maps(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) + __perf_evlist__propagate_maps(evlist, evsel); +} + void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) { entry->evlist = evlist; @@ -133,18 +160,19 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) if (!evlist->nr_entries++) perf_evlist__set_id_pos(evlist); + + __perf_evlist__propagate_maps(evlist, entry); } void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries) + struct list_head *list) { - bool set_id_pos = !evlist->nr_entries; + struct perf_evsel *evsel, *temp; - list_splice_tail(list, &evlist->entries); - evlist->nr_entries += nr_entries; - if (set_id_pos) - perf_evlist__set_id_pos(evlist); + __evlist__for_each_safe(list, temp, evsel) { + list_del_init(&evsel->node); + perf_evlist__add(evlist, evsel); + } } void __perf_evlist__set_leader(struct list_head *list) @@ -210,7 +238,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, list_add_tail(&evsel->node, &head); } - perf_evlist__splice_list_tail(evlist, &head, nr_attrs); + perf_evlist__splice_list_tail(evlist, &head); return 0; @@ -1103,71 +1131,56 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); } -static int perf_evlist__propagate_maps(struct perf_evlist *evlist, - bool has_user_cpus) -{ - struct perf_evsel *evsel; - - evlist__for_each(evlist, evsel) { - /* - * We already have cpus for evsel (via PMU sysfs) so - * keep it, if there's no target cpu list defined. - */ - if (evsel->cpus && has_user_cpus) - cpu_map__put(evsel->cpus); - - if (!evsel->cpus || has_user_cpus) - evsel->cpus = cpu_map__get(evlist->cpus); - - evsel->threads = thread_map__get(evlist->threads); - - if ((evlist->cpus && !evsel->cpus) || - (evlist->threads && !evsel->threads)) - return -ENOMEM; - } - - return 0; -} - int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) { - evlist->threads = thread_map__new_str(target->pid, target->tid, - target->uid); + struct cpu_map *cpus; + struct thread_map *threads; + + threads = thread_map__new_str(target->pid, target->tid, target->uid); - if (evlist->threads == NULL) + if (!threads) return -1; if (target__uses_dummy_map(target)) - evlist->cpus = cpu_map__dummy_new(); + cpus = cpu_map__dummy_new(); else - evlist->cpus = cpu_map__new(target->cpu_list); + cpus = cpu_map__new(target->cpu_list); - if (evlist->cpus == NULL) + if (!cpus) goto out_delete_threads; - return perf_evlist__propagate_maps(evlist, !!target->cpu_list); + evlist->has_user_cpus = !!target->cpu_list; + + perf_evlist__set_maps(evlist, cpus, threads); + + return 0; out_delete_threads: - thread_map__put(evlist->threads); - evlist->threads = NULL; + thread_map__put(threads); return -1; } -int perf_evlist__set_maps(struct perf_evlist *evlist, - struct cpu_map *cpus, - struct thread_map *threads) +void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads) { - if (evlist->cpus) + /* + * Allow for the possibility that one or another of the maps isn't being + * changed i.e. don't put it. Note we are assuming the maps that are + * being applied are brand new and evlist is taking ownership of the + * original reference count of 1. If that is not the case it is up to + * the caller to increase the reference count. + */ + if (cpus != evlist->cpus) { cpu_map__put(evlist->cpus); + evlist->cpus = cpus; + } - evlist->cpus = cpus; - - if (evlist->threads) + if (threads != evlist->threads) { thread_map__put(evlist->threads); + evlist->threads = threads; + } - evlist->threads = threads; - - return perf_evlist__propagate_maps(evlist, false); + perf_evlist__propagate_maps(evlist); } int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) @@ -1387,6 +1400,8 @@ void perf_evlist__close(struct perf_evlist *evlist) static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) { + struct cpu_map *cpus; + struct thread_map *threads; int err = -ENOMEM; /* @@ -1398,20 +1413,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) * error, and we may not want to do that fallback to a * default cpu identity map :-\ */ - evlist->cpus = cpu_map__new(NULL); - if (evlist->cpus == NULL) + cpus = cpu_map__new(NULL); + if (!cpus) goto out; - evlist->threads = thread_map__new_dummy(); - if (evlist->threads == NULL) - goto out_free_cpus; + threads = thread_map__new_dummy(); + if (!threads) + goto out_put; - err = 0; + perf_evlist__set_maps(evlist, cpus, threads); out: return err; -out_free_cpus: - cpu_map__put(evlist->cpus); - evlist->cpus = NULL; +out_put: + cpu_map__put(cpus); goto out; } diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index b39a6198f4ac..115d8b53c601 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -42,6 +42,7 @@ struct perf_evlist { int nr_mmaps; bool overwrite; bool enabled; + bool has_user_cpus; size_t mmap_len; int id_pos; int is_pos; @@ -155,9 +156,8 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist, void perf_evlist__set_selected(struct perf_evlist *evlist, struct perf_evsel *evsel); -int perf_evlist__set_maps(struct perf_evlist *evlist, - struct cpu_map *cpus, - struct thread_map *threads); +void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads); int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); @@ -179,8 +179,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); bool perf_evlist__valid_read_format(struct perf_evlist *evlist); void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries); + struct list_head *list); static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) { diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c53f79123b37..5410483d5219 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1033,6 +1033,7 @@ void perf_evsel__exit(struct perf_evsel *evsel) perf_evsel__free_config_terms(evsel); close_cgroup(evsel->cgrp); cpu_map__put(evsel->cpus); + cpu_map__put(evsel->own_cpus); thread_map__put(evsel->threads); zfree(&evsel->group_name); zfree(&evsel->name); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 298e6bbca200..ef8925f7211a 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -98,6 +98,7 @@ struct perf_evsel { struct cgroup_sel *cgrp; void *handler; struct cpu_map *cpus; + struct cpu_map *own_cpus; struct thread_map *threads; unsigned int sample_size; int id_pos; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 41814547da15..fce6634aebe2 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1438,7 +1438,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, if (ph->needs_swap) nr = bswap_32(nr); - ph->env.nr_cpus_online = nr; + ph->env.nr_cpus_avail = nr; ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) @@ -1447,7 +1447,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, if (ph->needs_swap) nr = bswap_32(nr); - ph->env.nr_cpus_avail = nr; + ph->env.nr_cpus_online = nr; return 0; } diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index ea768625ab5b..eb0e7f8bf515 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c @@ -623,7 +623,7 @@ static int intel_bts_process_event(struct perf_session *session, if (err) return err; if (event->header.type == PERF_RECORD_EXIT) { - err = intel_bts_process_tid_exit(bts, event->comm.tid); + err = intel_bts_process_tid_exit(bts, event->fork.tid); if (err) return err; } diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index bb41c20e6005..535d86f8e4d1 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -1494,7 +1494,7 @@ static int intel_pt_process_event(struct perf_session *session, if (pt->timeless_decoding) { if (event->header.type == PERF_RECORD_EXIT) { err = intel_pt_process_timeless_queues(pt, - event->comm.tid, + event->fork.tid, sample->time); } } else if (timestamp) { diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d826e6f515db..21ed6ee63da9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -287,8 +287,8 @@ __add_event(struct list_head *list, int *idx, if (!evsel) return NULL; - if (cpus) - evsel->cpus = cpu_map__get(cpus); + evsel->cpus = cpu_map__get(cpus); + evsel->own_cpus = cpu_map__get(cpus); if (name) evsel->name = strdup(name); @@ -1140,10 +1140,9 @@ int parse_events(struct perf_evlist *evlist, const char *str, ret = parse_events__scanner(str, &data, PE_START_EVENTS); perf_pmu__parse_cleanup(); if (!ret) { - int entries = data.idx - evlist->nr_entries; struct perf_evsel *last; - perf_evlist__splice_list_tail(evlist, &data.list, entries); + perf_evlist__splice_list_tail(evlist, &data.list); evlist->nr_groups += data.nr_groups; last = perf_evlist__last(evlist); last->cmdline_group_boundary = true; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 591905a02b92..9cd70819c795 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -255,7 +255,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc list_add_tail(&term->list, head); ALLOC_LIST(list); - ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); + ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); parse_events__free_terms(head); $$ = list; } diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 885e8ac83997..6b8eb13e14e4 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = { SMPL_REG_END }; +#ifdef HAVE_PERF_REGS_SUPPORT int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) { int i, idx = 0; @@ -29,3 +30,4 @@ out: *valp = regs->cache_regs[id]; return 0; } +#endif diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 2984dcc54d67..679d6e493962 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -2,6 +2,7 @@ #define __PERF_REGS_H #include <linux/types.h> +#include <linux/compiler.h> struct regs_dump; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) int ret = 0; if (module) { - list_for_each_entry(dso, &host_machine->dsos.head, node) { - if (!dso->kernel) - continue; - if (strncmp(dso->short_name + 1, module, - dso->short_name_len - 2) == 0) - goto found; + char module_name[128]; + + snprintf(module_name, sizeof(module_name), "[%s]", module); + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + if (map) { + dso = map->dso; + goto found; } pr_debug("Failed to find module %s.\n", module); return -ENOENT; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, file_offset = page_offset; head = data_offset - page_offset; - if (data_size && (data_offset + data_size < file_size)) + if (data_size == 0) + goto out; + + if (data_offset + data_size < file_size) file_size = data_offset + data_size; ui_progress__init(&prog, file_size, "Processing events..."); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); } -static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) +static int check_per_pkg(struct perf_evsel *counter, + struct perf_counts_values *vals, int cpu, bool *skip) { unsigned long *mask = counter->per_pkg_mask; struct cpu_map *cpus = perf_evsel__cpus(counter); @@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) counter->per_pkg_mask = mask; } + /* + * we do not consider an event that has not run as a good + * instance to mark a package as used (skip=1). Otherwise + * we may run into a situation where the first CPU in a package + * is not running anything, yet the second is, and this function + * would mark the package as used after the first CPU and would + * not read the values from the second CPU. + */ + if (!(vals->run && vals->ena)) + return 0; + s = cpu_map__get_socket(cpus, cpu); if (s < 0) return -1; @@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel static struct perf_counts_values zero; bool skip = false; - if (check_per_pkg(evsel, cpu, &skip)) { + if (check_per_pkg(evsel, count, cpu, &skip)) { pr_err("failed to read per-pkg counter\n"); return -1; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, #endif #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT -int elf_getphdrnum(Elf *elf, size_t *dst) +static int elf_getphdrnum(Elf *elf, size_t *dst) { GElf_Ehdr gehdr; GElf_Ehdr *ehdr; @@ -1271,8 +1271,6 @@ out_close: static int kcore__init(struct kcore *kcore, char *filename, int elfclass, bool temp) { - GElf_Ehdr *ehdr; - kcore->elfclass = elfclass; if (temp) @@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, if (!gelf_newehdr(kcore->elf, elfclass)) goto out_end; - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); - if (!ehdr) - goto out_end; + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); return 0; @@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, u64 addr, u64 len) { - GElf_Phdr gphdr; - GElf_Phdr *phdr; - - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); - if (!phdr) - return -1; - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = offset; - phdr->p_vaddr = addr; - phdr->p_paddr = 0; - phdr->p_filesz = len; - phdr->p_memsz = len; - phdr->p_align = page_size; - - if (!gelf_update_phdr(kcore->elf, idx, phdr)) + GElf_Phdr phdr = { + .p_type = PT_LOAD, + .p_flags = PF_R | PF_W | PF_X, + .p_offset = offset, + .p_vaddr = addr, + .p_paddr = 0, + .p_filesz = len, + .p_memsz = len, + .p_align = page_size, + }; + + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) return -1; return 0; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -709,7 +709,7 @@ bool find_process(const char *name) dir = opendir(procfs__mountpoint()); if (!dir) - return -1; + return false; /* Walk through the directory. */ while (ret && (d = readdir(dir)) != NULL) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; +unsigned int aperf_mperf_multiplier = 1; int do_smi; double bclk; +double base_hz; +double tsc_tweak = 1.0; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; @@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* %Busy */ if (has_aperf) { if (!skip_c0) - outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); else outp += sprintf(outp, "********"); } @@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* Bzy_MHz */ if (has_aperf) outp += sprintf(outp, "%8.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); /* TSC_MHz */ outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); @@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; + t->aperf = t->aperf * aperf_mperf_multiplier; + t->mperf = t->mperf * aperf_mperf_multiplier; } if (do_smi) { @@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; + +static void +calculate_tsc_tweak() +{ + unsigned long long msr; + unsigned int base_ratio; + + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); + base_ratio = (msr >> 8) & 0xFF; + base_hz = base_ratio * bclk * 1000000; + tsc_tweak = base_hz / tsc_hz; +} + static void dump_nhm_platform_info(void) { @@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) switch (model) { case 0x3A: /* IVB */ - case 0x3E: /* IVB Xeon */ - case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ @@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) return 0; } +unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) +{ + if (is_knl(family, model)) + return 1024; + return 1; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2744,6 +2767,9 @@ void process_cpuid() } } + if (has_aperf) + aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); + do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); @@ -2762,6 +2788,9 @@ void process_cpuid() if (debug) dump_cstate_pstate_config_info(); + if (has_skl_msrs(family, model)) + calculate_tsc_tweak(); + return; } @@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.7 17-June, 2015" + fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 89b05e2222c9..cfe121353eec 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -16,12 +16,12 @@ TARGETS += powerpc TARGETS += ptrace TARGETS += seccomp TARGETS += size +TARGETS += static_keys TARGETS += sysctl ifneq (1, $(quicktest)) TARGETS += timers endif TARGETS += user -TARGETS += jumplabel TARGETS += vm TARGETS += x86 TARGETS += zram diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index 6b76bfdc847e..4e400eb83657 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -1,6 +1,6 @@ CFLAGS = -Wall BINARIES = execveat -DEPS = execveat.symlink execveat.denatured script +DEPS = execveat.symlink execveat.denatured script subdir all: $(BINARIES) $(DEPS) subdir: @@ -22,7 +22,5 @@ TEST_FILES := $(DEPS) include ../lib.mk -override EMIT_TESTS := echo "mkdir -p subdir; (./execveat && echo \"selftests: execveat [PASS]\") || echo \"selftests: execveat [FAIL]\"" - clean: rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx* diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile index 0acbeca47225..4e6ed13e7f66 100644 --- a/tools/testing/selftests/ftrace/Makefile +++ b/tools/testing/selftests/ftrace/Makefile @@ -1,7 +1,7 @@ all: TEST_PROGS := ftracetest -TEST_DIRS := test.d/ +TEST_DIRS := test.d include ../lib.mk diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 97f1c6742066..50a93f5f13d6 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -12,13 +12,10 @@ run_tests: all $(RUN_TESTS) define INSTALL_RULE - @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ - mkdir -p $(INSTALL_PATH); \ - for TEST_DIR in $(TEST_DIRS); do \ - cp -r $$TEST_DIR $(INSTALL_PATH); \ - done; \ - echo "install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)"; \ - install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES); \ + @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ + mkdir -p ${INSTALL_PATH}; \ + echo "rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ + rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ fi endef diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile index 877a50355d7f..a1a97085847d 100644 --- a/tools/testing/selftests/membarrier/Makefile +++ b/tools/testing/selftests/membarrier/Makefile @@ -1,11 +1,10 @@ CFLAGS += -g -I../../../../usr/include/ -all: - $(CC) $(CFLAGS) membarrier_test.c -o membarrier_test - TEST_PROGS := membarrier_test +all: $(TEST_PROGS) + include ../lib.mk clean: - $(RM) membarrier_test + $(RM) $(TEST_PROGS) diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c index dde312508007..535f0fef4d0b 100644 --- a/tools/testing/selftests/membarrier/membarrier_test.c +++ b/tools/testing/selftests/membarrier/membarrier_test.c @@ -1,9 +1,6 @@ #define _GNU_SOURCE -#define __EXPORTED_HEADERS__ - #include <linux/membarrier.h> -#include <asm-generic/unistd.h> -#include <sys/syscall.h> +#include <syscall.h> #include <stdio.h> #include <errno.h> #include <string.h> diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 0e3b41eb85cd..eebac29acbd9 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile @@ -1,8 +1,8 @@ -CFLAGS = -O2 +CFLAGS += -O2 +LDLIBS = -lrt -lpthread -lpopt +TEST_PROGS := mq_open_tests mq_perf_tests -all: - $(CC) $(CFLAGS) mq_open_tests.c -o mq_open_tests -lrt - $(CC) $(CFLAGS) -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt +all: $(TEST_PROGS) include ../lib.mk @@ -11,8 +11,6 @@ override define RUN_TESTS @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" endef -TEST_PROGS := mq_open_tests mq_perf_tests - override define EMIT_TESTS echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\"" echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\"" diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c index d1b647509596..6cae06117b55 100644 --- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c @@ -25,10 +25,19 @@ #define FIXUP_SECTION ".ex_fixup" +static inline unsigned long __fls(unsigned long x); + #include "word-at-a-time.h" #include "utils.h" +static inline unsigned long __fls(unsigned long x) +{ + int lz; + + asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); + return sizeof(unsigned long) - 1 - lz; +} static int page_size; static char *mem_region; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index a004b4cce99e..770f47adf295 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1210,6 +1210,10 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM gpr[0] # define SYSCALL_RET gpr[3] +#elif defined(__s390__) +# define ARCH_REGS s390_regs +# define SYSCALL_NUM gprs[2] +# define SYSCALL_RET gprs[2] #else # error "Do not know how to find your architecture's registers and syscalls" #endif @@ -1243,7 +1247,8 @@ void change_syscall(struct __test_metadata *_metadata, ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); EXPECT_EQ(0, ret); -#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) +#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__s390__) { regs.SYSCALL_NUM = syscall; } @@ -1281,17 +1286,21 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); + /* Validate and take action on expected syscalls. */ switch (msg) { case 0x1002: /* change getpid to getppid. */ + EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, __NR_getppid); break; case 0x1003: /* skip gettid. */ + EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, -1); break; case 0x1004: /* do nothing (allow getppid) */ + EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); break; default: EXPECT_EQ(0, msg) { @@ -1409,6 +1418,8 @@ TEST_F(TRACE_syscall, syscall_dropped) # define __NR_seccomp 277 # elif defined(__powerpc__) # define __NR_seccomp 358 +# elif defined(__s390__) +# define __NR_seccomp 348 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff @@ -1453,6 +1464,9 @@ TEST(seccomp_syscall) /* Reject insane operation. */ ret = seccomp(-1, 0, &prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject crazy op value!"); } @@ -1501,6 +1515,9 @@ TEST(seccomp_syscall_mode_lock) } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } EXPECT_EQ(0, ret) { TH_LOG("Could not install filter!"); } @@ -1535,6 +1552,9 @@ TEST(TSYNC_first) ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, &prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } EXPECT_EQ(0, ret) { TH_LOG("Could not install initial filter with TSYNC!"); } @@ -1694,6 +1714,9 @@ TEST_F(TSYNC, siblings_fail_prctl) /* Check prctl failure detection by requesting sib 0 diverge. */ ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } ASSERT_EQ(0, ret) { TH_LOG("setting filter failed"); } @@ -1731,6 +1754,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor) } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } @@ -1805,6 +1831,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter) ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, &self->apply_prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); } @@ -1833,6 +1862,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence) } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } @@ -1890,6 +1922,9 @@ TEST_F(TSYNC, two_siblings_not_under_filter) } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); + } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h index 977a6afc4489..fb2841601f2f 100644 --- a/tools/testing/selftests/seccomp/test_harness.h +++ b/tools/testing/selftests/seccomp/test_harness.h @@ -370,11 +370,8 @@ __typeof__(_expected) __exp = (_expected); \ __typeof__(_seen) __seen = (_seen); \ if (!(__exp _t __seen)) { \ - unsigned long long __exp_print = 0; \ - unsigned long long __seen_print = 0; \ - /* Avoid casting complaints the scariest way we can. */ \ - memcpy(&__exp_print, &__exp, sizeof(__exp)); \ - memcpy(&__seen_print, &__seen, sizeof(__seen)); \ + unsigned long long __exp_print = (unsigned long long)__exp; \ + unsigned long long __seen_print = (unsigned long long)__seen; \ __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ #_expected, __exp_print, #_t, \ #_seen, __seen_print); \ diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index d36fab7d8ebd..3c53cac15de1 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -1,6 +1,6 @@ # Makefile for vm selftests -CFLAGS = -Wall +CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) BINARIES = compaction_test BINARIES += hugepage-mmap BINARIES += hugepage-shm @@ -12,8 +12,11 @@ BINARIES += userfaultfd all: $(BINARIES) %: %.c $(CC) $(CFLAGS) -o $@ $^ -lrt -userfaultfd: userfaultfd.c - $(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread +userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h + $(CC) $(CFLAGS) -O2 -o $@ $< -lpthread + +../../../../usr/include/linux/kernel.h: + make -C ../../../.. headers_install TEST_PROGS := run_vmtests TEST_FILES := $(BINARIES) diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 2c7cca6f26a4..d77ed41b2094 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -64,17 +64,9 @@ #include <sys/syscall.h> #include <sys/ioctl.h> #include <pthread.h> -#include "../../../../include/uapi/linux/userfaultfd.h" - -#ifdef __x86_64__ -#define __NR_userfaultfd 323 -#elif defined(__i386__) -#define __NR_userfaultfd 374 -#elif defined(__powewrpc__) -#define __NR_userfaultfd 364 -#else -#error "missing __NR_userfaultfd definition" -#endif +#include <linux/userfaultfd.h> + +#ifdef __NR_userfaultfd static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; @@ -430,7 +422,7 @@ static int userfaultfd_stress(void) struct uffdio_register uffdio_register; struct uffdio_api uffdio_api; unsigned long cpu; - int uffd_flags; + int uffd_flags, err; unsigned long userfaults[nr_cpus]; if (posix_memalign(&area, page_size, nr_pages * page_size)) { @@ -473,6 +465,14 @@ static int userfaultfd_stress(void) *area_mutex(area_src, nr) = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER; count_verify[nr] = *area_count(area_src, nr) = 1; + /* + * In the transition between 255 to 256, powerpc will + * read out of order in my_bcmp and see both bytes as + * zero, so leave a placeholder below always non-zero + * after the count, to avoid my_bcmp to trigger false + * positives. + */ + *(area_count(area_src, nr) + 1) = 1; } pipefd = malloc(sizeof(int) * nr_cpus * 2); @@ -499,6 +499,7 @@ static int userfaultfd_stress(void) pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 16*1024*1024); + err = 0; while (bounces--) { unsigned long expected_ioctls; @@ -579,20 +580,13 @@ static int userfaultfd_stress(void) /* verification */ if (bounces & BOUNCE_VERIFY) { for (nr = 0; nr < nr_pages; nr++) { - if (my_bcmp(area_dst, - area_dst + nr * page_size, - sizeof(pthread_mutex_t))) { - fprintf(stderr, - "error mutex 2 %lu\n", - nr); - bounces = 0; - } if (*area_count(area_dst, nr) != count_verify[nr]) { fprintf(stderr, "error area_count %Lu %Lu %lu\n", *area_count(area_src, nr), count_verify[nr], nr); + err = 1; bounces = 0; } } @@ -609,7 +603,7 @@ static int userfaultfd_stress(void) printf("\n"); } - return 0; + return err; } int main(int argc, char **argv) @@ -618,8 +612,8 @@ int main(int argc, char **argv) fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); page_size = sysconf(_SC_PAGE_SIZE); - if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) > - page_size) + if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2 + > page_size) fprintf(stderr, "Impossible to run this test\n"), exit(2); nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / nr_cpus; @@ -637,3 +631,15 @@ int main(int argc, char **argv) nr_pages, nr_pages_per_cpu); return userfaultfd_stress(); } + +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); + return 0; +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c index 9a43a59a9bb4..421c607a8856 100644 --- a/tools/testing/selftests/x86/entry_from_vm86.c +++ b/tools/testing/selftests/x86/entry_from_vm86.c @@ -116,8 +116,9 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, v86->regs.eip = eip; ret = vm86(VM86_ENTER, v86); - if (ret == -1 && errno == ENOSYS) { - printf("[SKIP]\tvm86 not supported\n"); + if (ret == -1 && (errno == ENOSYS || errno == EPERM)) { + printf("[SKIP]\tvm86 %s\n", + errno == ENOSYS ? "not supported" : "not allowed"); return false; } diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 20de9a761269..683a292e3290 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -1,15 +1,7 @@ #!/bin/bash TCID="zram.sh" -check_prereqs() -{ - local msg="skip all tests:" - - if [ $UID != 0 ]; then - echo $msg must be run as root >&2 - exit 0 - fi -} +. ./zram_lib.sh run_zram () { echo "--------------------" diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 424e68ed1487..f6a9c73e7a44 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -23,8 +23,9 @@ trap INT check_prereqs() { local msg="skip all tests:" + local uid=$(id -u) - if [ $UID != 0 ]; then + if [ $uid -ne 0 ]; then echo $msg must be run as root >&2 exit 0 fi diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index 505ad51b3b51..39c89a5ea990 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile @@ -6,7 +6,7 @@ vringh_test: vringh_test.o vringh.o virtio_ring.o CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE vpath %.c ../../drivers/virtio ../../drivers/vhost mod: - ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test + ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} .PHONY: all test mod clean clean: ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h index aff61e13306c..26b7926bda88 100644 --- a/tools/virtio/asm/barrier.h +++ b/tools/virtio/asm/barrier.h @@ -3,6 +3,8 @@ #define mb() __sync_synchronize() #define smp_mb() mb() +# define dma_rmb() barrier() +# define dma_wmb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() /* Weak barriers should be used. If not - it's a bug */ diff --git a/tools/virtio/linux/export.h b/tools/virtio/linux/export.h new file mode 100644 index 000000000000..416875e29254 --- /dev/null +++ b/tools/virtio/linux/export.h @@ -0,0 +1,3 @@ +#define EXPORT_SYMBOL_GPL(sym) extern typeof(sym) sym +#define EXPORT_SYMBOL(sym) extern typeof(sym) sym + diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 1e8ce6979c1e..0a3da64638ce 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -22,6 +22,7 @@ typedef unsigned long long dma_addr_t; typedef size_t __kernel_size_t; +typedef unsigned int __wsum; struct page { unsigned long long dummy; @@ -47,6 +48,13 @@ static inline void *kmalloc(size_t s, gfp_t gfp) return __kmalloc_fake; return malloc(s); } +static inline void *kzalloc(size_t s, gfp_t gfp) +{ + void *p = kmalloc(s, gfp); + + memset(p, 0, s); + return p; +} static inline void kfree(void *p) { diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 76e38d231e99..b9d3a32cbc04 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + bool phys_active; + int ret; /* * We're about to run this vcpu again, so there is no need to @@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) */ if (kvm_timer_should_fire(vcpu)) kvm_timer_inject_irq(vcpu); + + /* + * We keep track of whether the edge-triggered interrupt has been + * signalled to the vgic/guest, and if so, we mask the interrupt and + * the physical distributor to prevent the timer from raising a + * physical interrupt whenever we run a guest, preventing forward + * VCPU progress. + */ + if (kvm_vgic_get_phys_irq_active(timer->map)) + phys_active = true; + else + phys_active = false; + + ret = irq_set_irqchip_state(timer->map->irq, + IRQCHIP_STATE_ACTIVE, + phys_active); + WARN_ON(ret); } /** @@ -200,6 +219,14 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, timer->irq = irq; /* + * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 + * and to 0 for ARMv7. We provide an implementation that always + * resets the timer to be disabled and unmasked and is compliant with + * the ARMv7 architecture. + */ + timer->cntv_ctl = 0; + + /* * Tell the VGIC that the virtual interrupt is tied to a * physical interrupt. We do that once per VCPU. */ diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index afbf925b00f4..7dd5d62f10a1 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -288,7 +288,7 @@ int vgic_v3_probe(struct device_node *vgic_node, vgic->vctrl_base = NULL; vgic->type = VGIC_V3; - vgic->max_gic_vcpus = KVM_MAX_VCPUS; + vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS; kvm_info("%s@%llx IRQ%d\n", vgic_node->name, vcpu_res.start, vgic->maint_irq); diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 9eb489a2c94c..66c66165e712 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, return false; } +/* + * If a mapped interrupt's state has been modified by the guest such that it + * is no longer active or pending, without it have gone through the sync path, + * then the map->active field must be cleared so the interrupt can be taken + * again. + */ +static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct list_head *root; + struct irq_phys_map_entry *entry; + struct irq_phys_map *map; + + rcu_read_lock(); + + /* Check for PPIs */ + root = &vgic_cpu->irq_phys_map_list; + list_for_each_entry_rcu(entry, root, entry) { + map = &entry->map; + + if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) && + !vgic_irq_is_active(vcpu, map->virt_irq)) + map->active = false; + } + + rcu_read_unlock(); +} + bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, phys_addr_t offset, int vcpu_id) @@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm, vcpu_id, offset); vgic_reg_access(mmio, reg, offset, mode); + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); vgic_update_state(kvm); return true; } @@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm, ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); if (mmio->is_write) { + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); vgic_update_state(kvm); return true; } @@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; pend_shared = vcpu->arch.vgic_cpu.pending_shared; + if (!dist->enabled) { + bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS); + bitmap_zero(pend_shared, nr_shared); + return 0; + } + pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); @@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm) struct kvm_vcpu *vcpu; int c; - if (!dist->enabled) { - set_bit(0, dist->irq_pending_on_cpu); - return; - } - kvm_for_each_vcpu(c, vcpu, kvm) { if (compute_pending_for_cpu(vcpu)) set_bit(c, dist->irq_pending_on_cpu); @@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); + /* + * We must transfer the pending state back to the distributor before + * retiring the LR, otherwise we may loose edge-triggered interrupts. + */ + if (vlr.state & LR_STATE_PENDING) { + vgic_dist_irq_set_pending(vcpu, irq); + vlr.hwirq = 0; + } + vlr.state = 0; vgic_set_lr(vcpu, lr_nr, vlr); clear_bit(lr_nr, vgic_cpu->lr_used); @@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); vgic_irq_clear_active(vcpu, irq); vgic_update_state(vcpu->kvm); - } else if (vgic_dist_irq_is_pending(vcpu, irq)) { + } else { + WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq)); vlr.state |= LR_STATE_PENDING; kvm_debug("Set pending: 0x%x\n", vlr.state); } @@ -1144,26 +1185,11 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, struct irq_phys_map *map; map = vgic_irq_map_search(vcpu, irq); - /* - * If we have a mapping, and the virtual interrupt is - * being injected, then we must set the state to - * active in the physical world. Otherwise the - * physical interrupt will fire and the guest will - * exit before processing the virtual interrupt. - */ if (map) { - int ret; - - BUG_ON(!map->active); vlr.hwirq = map->phys_irq; vlr.state |= LR_HW; vlr.state &= ~LR_EOI_INT; - ret = irq_set_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - true); - WARN_ON(ret); - /* * Make sure we're not going to sample this * again, as a HW-backed interrupt cannot be @@ -1411,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) return 0; map = vgic_irq_map_search(vcpu, vlr.irq); - BUG_ON(!map || !map->active); + BUG_ON(!map); ret = irq_get_irqchip_state(map->irq, IRQCHIP_STATE_ACTIVE, @@ -1419,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) WARN_ON(ret); - if (map->active) { - ret = irq_set_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - false); - WARN_ON(ret); + if (map->active) return 0; - } return 1; } @@ -1597,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, } else { if (level_triggered) { vgic_dist_irq_clear_level(vcpu, irq_num); - if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) + if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) { vgic_dist_irq_clear_pending(vcpu, irq_num); + vgic_cpu_irq_clear(vcpu, irq_num); + if (!compute_pending_for_cpu(vcpu)) + clear_bit(cpuid, dist->irq_pending_on_cpu); + } } ret = false; diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h index 5cbf190d238c..6bca74ca5331 100644 --- a/virt/kvm/coalesced_mmio.h +++ b/virt/kvm/coalesced_mmio.h @@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev { int kvm_coalesced_mmio_init(struct kvm *kvm); void kvm_coalesced_mmio_free(struct kvm *kvm); int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, - struct kvm_coalesced_mmio_zone *zone); + struct kvm_coalesced_mmio_zone *zone); int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, - struct kvm_coalesced_mmio_zone *zone); + struct kvm_coalesced_mmio_zone *zone); #else diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 9ff4193dfa49..79db45336e3a 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) return KVM_MMIO_BUS; } -static int -kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +static int kvm_assign_ioeventfd_idx(struct kvm *kvm, + enum kvm_bus bus_idx, + struct kvm_ioeventfd *args) { - enum kvm_bus bus_idx; - struct _ioeventfd *p; - struct eventfd_ctx *eventfd; - int ret; - - bus_idx = ioeventfd_bus_from_flags(args->flags); - /* must be natural-word sized, or 0 to ignore length */ - switch (args->len) { - case 0: - case 1: - case 2: - case 4: - case 8: - break; - default: - return -EINVAL; - } - - /* check for range overflow */ - if (args->addr + args->len < args->addr) - return -EINVAL; - /* check for extra flags that we don't understand */ - if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) - return -EINVAL; - - /* ioeventfd with no length can't be combined with DATAMATCH */ - if (!args->len && - args->flags & (KVM_IOEVENTFD_FLAG_PIO | - KVM_IOEVENTFD_FLAG_DATAMATCH)) - return -EINVAL; + struct eventfd_ctx *eventfd; + struct _ioeventfd *p; + int ret; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) @@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) if (ret < 0) goto unlock_fail; - /* When length is ignored, MMIO is also put on a separate bus, for - * faster lookups. - */ - if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) { - ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS, - p->addr, 0, &p->dev); - if (ret < 0) - goto register_fail; - } - kvm->buses[bus_idx]->ioeventfd_count++; list_add_tail(&p->list, &kvm->ioeventfds); @@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) return 0; -register_fail: - kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); unlock_fail: mutex_unlock(&kvm->slots_lock); @@ -873,14 +835,13 @@ fail: } static int -kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_ioeventfd *args) { - enum kvm_bus bus_idx; struct _ioeventfd *p, *tmp; struct eventfd_ctx *eventfd; int ret = -ENOENT; - bus_idx = ioeventfd_bus_from_flags(args->flags); eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); @@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); - if (!p->length) { - kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS, - &p->dev); - } kvm->buses[bus_idx]->ioeventfd_count--; ioeventfd_release(p); ret = 0; @@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) return ret; } +static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +{ + enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); + int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); + + if (!args->len && bus_idx == KVM_MMIO_BUS) + kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); + + return ret; +} + +static int +kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +{ + enum kvm_bus bus_idx; + int ret; + + bus_idx = ioeventfd_bus_from_flags(args->flags); + /* must be natural-word sized, or 0 to ignore length */ + switch (args->len) { + case 0: + case 1: + case 2: + case 4: + case 8: + break; + default: + return -EINVAL; + } + + /* check for range overflow */ + if (args->addr + args->len < args->addr) + return -EINVAL; + + /* check for extra flags that we don't understand */ + if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) + return -EINVAL; + + /* ioeventfd with no length can't be combined with DATAMATCH */ + if (!args->len && + args->flags & (KVM_IOEVENTFD_FLAG_PIO | + KVM_IOEVENTFD_FLAG_DATAMATCH)) + return -EINVAL; + + ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); + if (ret) + goto fail; + + /* When length is ignored, MMIO is also put on a separate bus, for + * faster lookups. + */ + if (!args->len && bus_idx == KVM_MMIO_BUS) { + ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); + if (ret < 0) + goto fast_fail; + } + + return 0; + +fast_fail: + kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); +fail: + return ret; +} + int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a25a73147f71..8db1d9361993 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -66,8 +66,8 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); -/* halt polling only reduces halt latency by 5-7 us, 500us is enough */ -static unsigned int halt_poll_ns = 500000; +/* Architectures should define their poll value according to the halt latency */ +static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); /* Default doubles per-vcpu halt_poll_ns. */ @@ -2004,6 +2004,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) if (vcpu->halt_poll_ns) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); + ++vcpu->stat.halt_attempted_poll; do { /* * This sets KVM_REQ_UNHALT if an interrupt @@ -2043,7 +2044,8 @@ out: else if (vcpu->halt_poll_ns < halt_poll_ns && block_ns < halt_poll_ns) grow_halt_poll_ns(vcpu); - } + } else + vcpu->halt_poll_ns = 0; trace_kvm_vcpu_wakeup(block_ns, waited); } @@ -3156,10 +3158,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, const struct kvm_io_range *r2) { - if (r1->addr < r2->addr) + gpa_t addr1 = r1->addr; + gpa_t addr2 = r2->addr; + + if (addr1 < addr2) return -1; - if (r1->addr + r1->len > r2->addr + r2->len) + + /* If r2->len == 0, match the exact address. If r2->len != 0, + * accept any overlapping write. Any order is acceptable for + * overlapping ranges, because kvm_io_bus_get_first_dev ensures + * we process all of them. + */ + if (r2->len) { + addr1 += r1->len; + addr2 += r2->len; + } + + if (addr1 > addr2) return 1; + return 0; } |