From b4d2ea2af95cb77e2f320e24da526280d4aa2f6b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 8 May 2017 10:48:21 +0200 Subject: Revert "pinctrl: generic: Add bi-directional and output-enable" This reverts commit 8c58f1a7a4b6d1d723bf25fef9d842d5a11200d0. It turns out that applying these generic properties was premature: the properties used in the driver using this are of unclear electrical nature and the subject need to be discussed. Signed-off-by: Linus Walleij --- Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt | 2 -- 1 file changed, 2 deletions(-) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt index 71a3c134af1b..f01d154090da 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt @@ -247,7 +247,6 @@ bias-bus-hold - latch weakly bias-pull-up - pull up the pin bias-pull-down - pull down the pin bias-pull-pin-default - use pin-default pull state -bi-directional - pin supports simultaneous input/output operations drive-push-pull - drive actively high and low drive-open-drain - drive with open drain drive-open-source - drive with open source @@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X power-source - select between different power supplies low-power-enable - enable low power mode low-power-disable - disable low power mode -output-enable - enable output on pin regardless of output value output-low - set the pin to output mode with low level output-high - set the pin to output mode with high level slew-rate - set the slew rate -- cgit v1.2.3 From a79e7df97592b2326be81d5dae286bdb5c529a01 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 22 May 2017 16:41:24 +0200 Subject: ALSA: hda - Update the list of quirk models I've forgotten to sync the documentation with the actually available options for some time. Now all updated. Signed-off-by: Takashi Iwai --- Documentation/sound/hd-audio/models.rst | 114 ++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 49 deletions(-) (limited to 'Documentation') diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index 5338673c88d9..773d2bfacc6c 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -16,6 +16,8 @@ ALC880 6-jack in back, 2-jack in front 6stack-digout 6-jack with a SPDIF out +6stack-automute + 6-jack with headphone jack detection ALC260 ====== @@ -62,6 +64,8 @@ lenovo-dock Enables docking station I/O for some Lenovos hp-gpio-led GPIO LED support on HP laptops +hp-dock-gpio-mic1-led + HP dock with mic LED support dell-headset-multi Headset jack, which can also be used as mic-in dell-headset-dock @@ -72,6 +76,12 @@ alc283-sense-combo Combo jack sensing on ALC283 tpt440-dock Pin configs for Lenovo Thinkpad Dock support +tpt440 + Lenovo Thinkpad T440s setup +tpt460 + Lenovo Thinkpad T460/560 setup +dual-codecs + Lenovo laptops with dual codecs ALC66x/67x/892 ============== @@ -97,6 +107,8 @@ inv-dmic Inverted internal mic workaround dell-headset-multi Headset jack, which can also be used as mic-in +dual-codecs + Lenovo laptops with dual codecs ALC680 ====== @@ -114,6 +126,8 @@ inv-dmic Inverted internal mic workaround no-primary-hp VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) +dual-codecs + ALC1220 dual codecs for Gaming mobos ALC861/660 ========== @@ -206,65 +220,47 @@ auto Conexant 5045 ============= -laptop-hpsense - Laptop with HP sense (old model laptop) -laptop-micsense - Laptop with Mic sense (old model fujitsu) -laptop-hpmicsense - Laptop with HP and Mic senses -benq - Benq R55E -laptop-hp530 - HP 530 laptop -test - for testing/debugging purpose, almost all controls can be - adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y +cap-mix-amp + Fix max input level on mixer widget +toshiba-p105 + Toshiba P105 quirk +hp-530 + HP 530 quirk Conexant 5047 ============= -laptop - Basic Laptop config -laptop-hp - Laptop config for some HP models (subdevice 30A5) -laptop-eapd - Laptop config with EAPD support -test - for testing/debugging purpose, almost all controls can be - adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y +cap-mix-amp + Fix max input level on mixer widget Conexant 5051 ============= -laptop - Basic Laptop config (default) -hp - HP Spartan laptop -hp-dv6736 - HP dv6736 -hp-f700 - HP Compaq Presario F700 -ideapad - Lenovo IdeaPad laptop -toshiba - Toshiba Satellite M300 +lenovo-x200 + Lenovo X200 quirk Conexant 5066 ============= -laptop - Basic Laptop config (default) -hp-laptop - HP laptops, e g G60 -asus - Asus K52JU, Lenovo G560 -dell-laptop - Dell laptops -dell-vostro - Dell Vostro -olpc-xo-1_5 - OLPC XO 1.5 -ideapad - Lenovo IdeaPad U150 +stereo-dmic + Workaround for inverted stereo digital mic +gpio1 + Enable GPIO1 pin +headphone-mic-pin + Enable headphone mic NID 0x18 without detection +tp410 + Thinkpad T400 & co quirks thinkpad - Lenovo Thinkpad + Thinkpad mute/mic LED quirk +lemote-a1004 + Lemote A1004 quirk +lemote-a1205 + Lemote A1205 quirk +olpc-xo + OLPC XO quirk +mute-led-eapd + Mute LED control via EAPD +hp-dock + HP dock support +mute-led-gpio + Mute LED control via GPIO STAC9200 ======== @@ -444,6 +440,8 @@ dell-eq Dell desktops/laptops alienware Alienware M17x +asus-mobo + Pin configs for ASUS mobo with 5.1/SPDIF out auto BIOS setup (default) @@ -477,6 +475,8 @@ hp-envy-ts-bass Pin fixup for HP Envy TS bass speaker (NID 0x10) hp-bnb13-eq Hardware equalizer setup for HP laptops +hp-envy-ts-bass + HP Envy TS bass support auto BIOS setup (default) @@ -496,10 +496,22 @@ auto Cirrus Logic CS4206/4207 ======================== +mbp53 + MacBook Pro 5,3 mbp55 MacBook Pro 5,5 imac27 IMac 27 Inch +imac27_122 + iMac 12,2 +apple + Generic Apple quirk +mbp101 + MacBookPro 10,1 +mbp81 + MacBookPro 8,1 +mba42 + MacBookAir 4,2 auto BIOS setup (default) @@ -509,6 +521,10 @@ mba6 MacBook Air 6,1 and 6,2 gpio0 Enable GPIO 0 amp +mbp11 + MacBookPro 11,2 +macmini + MacMini 7,1 auto BIOS setup (default) -- cgit v1.2.3 From aa3d4409b664813ceb86a24bd09458cdd29cbb8a Mon Sep 17 00:00:00 2001 From: Martin Kepplinger Date: Mon, 22 May 2017 17:19:45 -0700 Subject: Input: edt-ft5x06 - increase allowed data range for threshold parameter The datasheet and application note does not mention an allowed range for the M09_REGISTER_THRESHOLD parameter. One of our customers needs to set lower values than 20 and they seem to work just fine on EDT EP0xx0M09 with T5x06 touch. So, lacking a known lower limit, we increase the range for thresholds, and set the lower limit to 0. The documentation is updated accordingly. Signed-off-by: Schoefegger Stefan Signed-off-by: Manfred Schlaegl Signed-off-by: Martin Kepplinger Acked-by: Rob Herring Signed-off-by: Dmitry Torokhov --- Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt | 2 +- Documentation/input/devices/edt-ft5x06.rst | 2 +- drivers/input/touchscreen/edt-ft5x06.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt index 6db22103e2dd..025cf8c9324a 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt @@ -36,7 +36,7 @@ Optional properties: control gpios - threshold: allows setting the "click"-threshold in the range - from 20 to 80. + from 0 to 80. - gain: allows setting the sensitivity in the range from 0 to 31. Note that lower values indicate higher diff --git a/Documentation/input/devices/edt-ft5x06.rst b/Documentation/input/devices/edt-ft5x06.rst index 2032f0b7a8fa..1ccc94b192b7 100644 --- a/Documentation/input/devices/edt-ft5x06.rst +++ b/Documentation/input/devices/edt-ft5x06.rst @@ -15,7 +15,7 @@ It has been tested with the following devices: The driver allows configuration of the touch screen via a set of sysfs files: /sys/class/input/eventX/device/device/threshold: - allows setting the "click"-threshold in the range from 20 to 80. + allows setting the "click"-threshold in the range from 0 to 80. /sys/class/input/eventX/device/device/gain: allows setting the sensitivity in the range from 0 to 31. Note that diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 8cf8d8d5d4ef..f872817e81e4 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, M09_REGISTER_OFFSET, 0, 31); static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, - M09_REGISTER_THRESHOLD, 20, 80); + M09_REGISTER_THRESHOLD, 0, 80); static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, NO_REGISTER, 3, 14); -- cgit v1.2.3 From c477ebe21fabe0010a2ed324ce3a1762c757d867 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Sat, 6 May 2017 11:41:30 +0200 Subject: mmc: dt: pwrseq-simple: Invent power-off-delay-us During power off, after the GPIO pin has been asserted, some devices like the Wifi chip from TI, Wl18xx, needs a delay before the host continues with clock gating and turning off regulators as to follow a graceful shutdown sequence. Therefore invent an optional power-off-delay-us DT binding for mmc-pwrseq-simple, to allow us to support this constraint. Cc: devicetree@vger.kernel.org Cc: Rob Herring Cc: linux-mmc@vger.kernel.org Signed-off-by: Ulf Hansson Acked-by: Arnd Bergmann --- Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt | 2 ++ 1 file changed, 2 insertions(+) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt index e25436861867..9029b45b8a22 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt +++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt @@ -18,6 +18,8 @@ Optional properties: "ext_clock" (External clock provided to the card). - post-power-on-delay-ms : Delay in ms after powering the card and de-asserting the reset-gpios (if any) +- power-off-delay-us : Delay in us after asserting the reset-gpios (if any) + during power off of the card. Example: -- cgit v1.2.3 From f74ac688c981138c914f9afba50b646146e35585 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 24 Apr 2017 22:40:22 +0200 Subject: mfd: dts: hi655x: Add clock binding for the pmic The hi655x PMIC provides the regulators but also a clock. The latter is missing in the definition, so extend the documentation to include this as well. Signed-off-by: Daniel Lezcano Acked-by: Rob Herring Acked-by: Lee Jones [Ulf: Split patch and updated changelog] Signed-off-by: Ulf Hansson Acked-by: Arnd Bergmann --- Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt index 05485699d70e..9630ac0e4b56 100644 --- a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt +++ b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt @@ -16,6 +16,11 @@ Required properties: - reg: Base address of PMIC on Hi6220 SoC. - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain). - pmic-gpios: The GPIO used by PMIC IRQ. +- #clock-cells: From common clock binding; shall be set to 0 + +Optional properties: +- clock-output-names: From common clock binding to override the + default output clock name Example: pmic: pmic@f8000000 { @@ -24,4 +29,5 @@ Example: interrupt-controller; #interrupt-cells = <2>; pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + #clock-cells = <0>; } -- cgit v1.2.3 From 159a07604a99bd01e7db112de08d53dc4fcad109 Mon Sep 17 00:00:00 2001 From: Quentin Schulz Date: Tue, 23 May 2017 11:48:08 +0200 Subject: net: fec: add post PHY reset delay DT property Some PHY require to wait for a bit after the reset GPIO has been toggled. This adds support for the DT property `phy-reset-post-delay` which gives the delay in milliseconds to wait after reset. If the DT property is not given, no delay is observed. Post reset delay greater than 1000ms are invalid. Signed-off-by: Quentin Schulz Reviewed-by: Andrew Lunn Acked-by: Fugang Duan Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 4 ++++ drivers/net/ethernet/freescale/fec_main.c | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index a1e3693cca16..6f55bdd52f8a 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -15,6 +15,10 @@ Optional properties: - phy-reset-active-high : If present then the reset sequence using the GPIO specified in the "phy-reset-gpios" property is reversed (H=reset state, L=operation state). +- phy-reset-post-delay : Post reset delay in milliseconds. If present then + a delay of phy-reset-post-delay milliseconds will be observed after the + phy-reset-gpios has been toggled. Can be omitted thus no delay is + observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. - phy-supply : regulator that powers the Ethernet PHY. - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56a563f90b0b..f7c8649fd28f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; bool active_high = false; - int msec = 1; + int msec = 1, phy_post_delay = 0; struct device_node *np = pdev->dev.of_node; if (!np) @@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev) else if (!gpio_is_valid(phy_reset)) return 0; + err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); + /* valid reset duration should be less than 1s */ + if (!err && phy_post_delay > 1000) + return -EINVAL; + active_high = of_property_read_bool(np, "phy-reset-active-high"); err = devm_gpio_request_one(&pdev->dev, phy_reset, @@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev) gpio_set_value_cansleep(phy_reset, !active_high); + if (!phy_post_delay) + return 0; + + if (phy_post_delay > 20) + msleep(phy_post_delay); + else + usleep_range(phy_post_delay * 1000, + phy_post_delay * 1000 + 1000); + return 0; } #else /* CONFIG_OF */ -- cgit v1.2.3 From 6c713a3a23154f4f0c2217ea39811ebe443dafaa Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Fri, 26 May 2017 01:44:43 +0200 Subject: net: dsa: mv88e6xxx: Add eeprom-length to binding The binding documentation for the mv88e6xxx switch is missing the eeprom-length property, which has been implemented since May 2016, commit f8cd8753def0 ("dsa: mv88e6xxx: Handle eeprom-length property") Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/dsa/marvell.txt | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index 7ef9dbb08957..1d4d0f49c9d0 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt @@ -26,6 +26,10 @@ Optional properties: - interrupt-controller : Indicates the switch is itself an interrupt controller. This is used for the PHY interrupts. #interrupt-cells = <2> : Controller uses two cells, number and flag +- eeprom-length : Set to the length of an EEPROM connected to the + switch. Must be set if the switch can not detect + the presence and/or size of a connected EEPROM, + otherwise optional. - mdio : Container of PHY and devices on the switches MDIO bus. - mdio? : Container of PHYs and devices on the external MDIO -- cgit v1.2.3 From 5ee6842cd8aff517701549463927cb1e5eb50b01 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 29 May 2017 09:22:06 +0300 Subject: Documentation: networking: add DPAA Ethernet document Signed-off-by: Madalin Bucur Signed-off-by: Camelia Groza Signed-off-by: David S. Miller --- Documentation/networking/dpaa.txt | 194 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 Documentation/networking/dpaa.txt (limited to 'Documentation') diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt new file mode 100644 index 000000000000..76e016d4d344 --- /dev/null +++ b/Documentation/networking/dpaa.txt @@ -0,0 +1,194 @@ +The QorIQ DPAA Ethernet Driver +============================== + +Authors: +Madalin Bucur +Camelia Groza + +Contents +======== + + - DPAA Ethernet Overview + - DPAA Ethernet Supported SoCs + - Configuring DPAA Ethernet in your kernel + - DPAA Ethernet Frame Processing + - DPAA Ethernet Features + - Debugging + +DPAA Ethernet Overview +====================== + +DPAA stands for Data Path Acceleration Architecture and it is a +set of networking acceleration IPs that are available on several +generations of SoCs, both on PowerPC and ARM64. + +The Freescale DPAA architecture consists of a series of hardware blocks +that support Ethernet connectivity. The Ethernet driver depends upon the +following drivers in the Linux kernel: + + - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms) + drivers/iommu/fsl_* + - Frame Manager (FMan) + drivers/net/ethernet/freescale/fman + - Queue Manager (QMan), Buffer Manager (BMan) + drivers/soc/fsl/qbman + +A simplified view of the dpaa_eth interfaces mapped to FMan MACs: + + dpaa_eth /eth0\ ... /ethN\ + driver | | | | + ------------- ---- ----------- ---- ------------- + -Ports / Tx Rx \ ... / Tx Rx \ + FMan | | | | + -MACs | MAC0 | | MACN | + / dtsec0 \ ... / dtsecN \ (or tgec) + / \ / \(or memac) + --------- -------------- --- -------------- --------- + FMan, FMan Port, FMan SP, FMan MURAM drivers + --------------------------------------------------------- + FMan HW blocks: MURAM, MACs, Ports, SP + --------------------------------------------------------- + +The dpaa_eth relation to the QMan, BMan and FMan: + ________________________________ + dpaa_eth / eth0 \ + driver / \ + --------- -^- -^- -^- --- --------- + QMan driver / \ / \ / \ \ / | BMan | + |Rx | |Rx | |Tx | |Tx | | driver | + --------- |Dfl| |Err| |Cnf| |FQs| | | + QMan HW |FQ | |FQ | |FQs| | | | | + / \ / \ / \ \ / | | + --------- --- --- --- -v- --------- + | FMan QMI | | + | FMan HW FMan BMI | BMan HW | + ----------------------- -------- + +where the acronyms used above (and in the code) are: +DPAA = Data Path Acceleration Architecture +FMan = DPAA Frame Manager +QMan = DPAA Queue Manager +BMan = DPAA Buffers Manager +QMI = QMan interface in FMan +BMI = BMan interface in FMan +FMan SP = FMan Storage Profiles +MURAM = Multi-user RAM in FMan +FQ = QMan Frame Queue +Rx Dfl FQ = default reception FQ +Rx Err FQ = Rx error frames FQ +Tx Cnf FQ = Tx confirmation FQs +Tx FQs = transmission frame queues +dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps) +tgec = ten gigabit Ethernet controller (10 Gbps) +memac = multirate Ethernet MAC (10/100/1000/10000) + +DPAA Ethernet Supported SoCs +============================ + +The DPAA drivers enable the Ethernet controllers present on the following SoCs: + +# PPC +P1023 +P2041 +P3041 +P4080 +P5020 +P5040 +T1023 +T1024 +T1040 +T1042 +T2080 +T4240 +B4860 + +# ARM +LS1043A +LS1046A + +Configuring DPAA Ethernet in your kernel +======================================== + +To enable the DPAA Ethernet driver, the following Kconfig options are required: + +# common for arch/arm64 and arch/powerpc platforms +CONFIG_FSL_DPAA=y +CONFIG_FSL_FMAN=y +CONFIG_FSL_DPAA_ETH=y +CONFIG_FSL_XGMAC_MDIO=y + +# for arch/powerpc only +CONFIG_FSL_PAMU=y + +# common options needed for the PHYs used on the RDBs +CONFIG_VITESSE_PHY=y +CONFIG_REALTEK_PHY=y +CONFIG_AQUANTIA_PHY=y + +DPAA Ethernet Frame Processing +============================== + +On Rx, buffers for the incoming frames are retrieved from one of the three +existing buffers pools. The driver initializes and seeds these, each with +buffers of different sizes: 1KB, 2KB and 4KB. + +On Tx, all transmitted frames are returned to the driver through Tx +confirmation frame queues. The driver is then responsible for freeing the +buffers. In order to do this properly, a backpointer is added to the buffer +before transmission that points to the skb. When the buffer returns to the +driver on a confirmation FQ, the skb can be correctly consumed. + +DPAA Ethernet Features +====================== + +Currently the DPAA Ethernet driver enables the basic features required for +a Linux Ethernet driver. The support for advanced features will be added +gradually. + +The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx +checksum offload feature is enabled by default and cannot be controlled through +ethtool. + +The driver has support for multiple prioritized Tx traffic classes. Priorities +range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with +strict priority levels. Each traffic class contains NR_CPU TX queues. By +default, only one traffic class is enabled and the lowest priority Tx queues +are used. Higher priority traffic classes can be enabled with the mqprio +qdisc. For example, all four traffic classes are enabled on an interface with +the following command. Furthermore, skb priority levels are mapped to traffic +classes as follows: + + * priorities 0 to 3 - traffic class 0 (low priority) + * priorities 4 to 7 - traffic class 1 (medium-low priority) + * priorities 8 to 11 - traffic class 2 (medium-high priority) + * priorities 12 to 15 - traffic class 3 (high priority) + +tc qdisc add dev root handle 1: \ + mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1 + +Debugging +========= + +The following statistics are exported for each interface through ethtool: + + - interrupt count per CPU + - Rx packets count per CPU + - Tx packets count per CPU + - Tx confirmed packets count per CPU + - Tx S/G frames count per CPU + - Tx error count per CPU + - Rx error count per CPU + - Rx error count per type + - congestion related statistics: + - congestion status + - time spent in congestion + - number of time the device entered congestion + - dropped packets count per cause + +The driver also exports the following information in sysfs: + + - the FQ IDs for each FQ type + /sys/devices/platform/dpaa-ethernet.0/net//fqids + + - the IDs of the buffer pools in use + /sys/devices/platform/dpaa-ethernet.0/net//bpids -- cgit v1.2.3 From a91afc974ee8441940241e3c39c75d7b8f38e911 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:19 +0800 Subject: dt-bindings: clock: sunxi-ccu: Add pll-periph to PRCM's needed clocks The AR100 clock in the PRCM has parents, one of which is pll-periph from the main CCU. Add it to the list of required clocks for the PRCM CCU. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- Documentation/devicetree/bindings/clock/sunxi-ccu.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index e9c5a1d9834a..f465647a4dd2 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -22,7 +22,8 @@ Required properties : - #clock-cells : must contain 1 - #reset-cells : must contain 1 -For the PRCM CCUs on H3/A64, one more clock is needed: +For the PRCM CCUs on H3/A64, two more clocks are needed: +- "pll-periph": the SoC's peripheral PLL from the main CCU - "iosc": the SoC's internal frequency oscillator Example for generic CCU: @@ -39,8 +40,8 @@ Example for PRCM CCU: r_ccu: clock@01f01400 { compatible = "allwinner,sun50i-a64-r-ccu"; reg = <0x01f01400 0x100>; - clocks = <&osc24M>, <&osc32k>, <&iosc>; - clock-names = "hosc", "losc", "iosc"; + clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>; + clock-names = "hosc", "losc", "iosc", "pll-periph"; #clock-cells = <1>; #reset-cells = <1>; }; -- cgit v1.2.3 From a2b05b7aa60e1e9b60faf01dfb1cca35638d1ab1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 May 2017 21:24:41 +1000 Subject: powerpc/64s: Add dt_cpu_ftrs boot time setup option Provide a dt_cpu_ftrs= cmdline option to disable the dt_cpu_ftrs CPU feature discovery, and fall back to the "cputable" based version. Also allow control of advertising unknown features to userspace and with this parameter, and remove the clunky CONFIG option. Signed-off-by: Nicholas Piggin [mpe: Add explicit early check of bootargs in dt_cpu_ftrs_init()] Signed-off-by: Michael Ellerman --- Documentation/admin-guide/kernel-parameters.txt | 9 ++++ arch/powerpc/Kconfig | 5 --- arch/powerpc/kernel/dt_cpu_ftrs.c | 57 +++++++++++++++++++++---- 3 files changed, 57 insertions(+), 14 deletions(-) (limited to 'Documentation') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 15f79c27748d..0f5c3b4347c6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -866,6 +866,15 @@ dscc4.setup= [NET] + dt_cpu_ftrs= [PPC] + Format: {"off" | "known"} + Control how the dt_cpu_ftrs device-tree binding is + used for CPU feature discovery and setup (if it + exists). + off: Do not use it, fall back to legacy cpu table. + known: Do not pass through unknown features to guests + or userspace, only those that the kernel is aware of. + dump_apple_properties [X86] Dump name and content of EFI device properties on x86 Macs. Useful for driver authors to determine diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f7c8f9972f61..4a4a05afcaf7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -391,11 +391,6 @@ config PPC_DT_CPU_FTRS firmware provides this binding. If you're not sure say Y. -config PPC_CPUFEATURES_ENABLE_UNKNOWN - bool "cpufeatures pass through unknown features to guest/userspace" - depends on PPC_DT_CPU_FTRS - default y - config HIGHMEM bool "High memory support" depends on PPC32 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index fcc7588a96d6..76eebba13a1d 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -671,12 +672,24 @@ static struct dt_cpu_feature_match __initdata {"wait-v3", feat_enable, 0}, }; -/* XXX: how to configure this? Default + boot time? */ -#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN -#define CPU_FEATURE_ENABLE_UNKNOWN 1 -#else -#define CPU_FEATURE_ENABLE_UNKNOWN 0 -#endif +static bool __initdata using_dt_cpu_ftrs; +static bool __initdata enable_unknown = true; + +static int __init dt_cpu_ftrs_parse(char *str) +{ + if (!str) + return 0; + + if (!strcmp(str, "off")) + using_dt_cpu_ftrs = false; + else if (!strcmp(str, "known")) + enable_unknown = false; + else + return 1; + + return 0; +} +early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse); static void __init cpufeatures_setup_start(u32 isa) { @@ -707,7 +720,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) } } - if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { + if (!known && enable_unknown) { if (!feat_try_enable_unknown(f)) { pr_info("not enabling: %s (unknown and unsupported by kernel)\n", f->name); @@ -756,6 +769,26 @@ static void __init cpufeatures_setup_finished(void) cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); } +static int __init disabled_on_cmdline(void) +{ + unsigned long root, chosen; + const char *p; + + root = of_get_flat_dt_root(); + chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); + if (chosen == -FDT_ERR_NOTFOUND) + return false; + + p = of_get_flat_dt_prop(chosen, "bootargs", NULL); + if (!p) + return false; + + if (strstr(p, "dt_cpu_ftrs=off")) + return true; + + return false; +} + static int __init fdt_find_cpu_features(unsigned long node, const char *uname, int depth, void *data) { @@ -766,8 +799,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname, return 0; } -static bool __initdata using_dt_cpu_ftrs = false; - bool __init dt_cpu_ftrs_in_use(void) { return using_dt_cpu_ftrs; @@ -775,6 +806,8 @@ bool __init dt_cpu_ftrs_in_use(void) bool __init dt_cpu_ftrs_init(void *fdt) { + using_dt_cpu_ftrs = false; + /* Setup and verify the FDT, if it fails we just bail */ if (!early_init_dt_verify(fdt)) return false; @@ -782,6 +815,9 @@ bool __init dt_cpu_ftrs_init(void *fdt) if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) return false; + if (disabled_on_cmdline()) + return false; + cpufeatures_setup_cpu(); using_dt_cpu_ftrs = true; @@ -1027,5 +1063,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char void __init dt_cpu_ftrs_scan(void) { + if (!using_dt_cpu_ftrs) + return; + of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); } -- cgit v1.2.3 From 55b644fd2431cfd28d04cc28f092d49e7bea3433 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sat, 6 May 2017 19:37:45 +0200 Subject: usb: dwc2: add support for the DWC2 controller on Meson8 SoCs USB support in the Meson8 SoCs is provided by a DWC2 controller which works with the same settings as Meson8b and GXBB. Using the generic "snps,dwc2" binding results in an endless stream of "Overcurrent change detected" messages. Acked-by: Rob Herring Signed-off-by: Martin Blumenstingl Signed-off-by: Felipe Balbi --- Documentation/devicetree/bindings/usb/dwc2.txt | 1 + drivers/usb/dwc2/params.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'Documentation') diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt index 00bea038639e..fcf199b64d3d 100644 --- a/Documentation/devicetree/bindings/usb/dwc2.txt +++ b/Documentation/devicetree/bindings/usb/dwc2.txt @@ -10,6 +10,7 @@ Required properties: - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs; - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs; + - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs; - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs; - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs; - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs; diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 9cd8722f24f6..a3ffe97170ff 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = { { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, { .compatible = "snps,dwc2" }, { .compatible = "samsung,s3c6400-hsotg" }, + { .compatible = "amlogic,meson8-usb", + .data = dwc2_set_amlogic_params }, { .compatible = "amlogic,meson8b-usb", .data = dwc2_set_amlogic_params }, { .compatible = "amlogic,meson-gxbb-usb", -- cgit v1.2.3 From 1e0ce2a1ee0d5fb334e82e80600f1e8e77df525c Mon Sep 17 00:00:00 2001 From: Anmol Sarma Date: Sat, 3 Jun 2017 17:40:54 +0530 Subject: net: Update TCP congestion control documentation Update tcp.txt to fix mandatory congestion control ops and default CCA selection. Also, fix comment in tcp.h for undo_cwnd. Signed-off-by: Anmol Sarma Signed-off-by: David S. Miller --- Documentation/networking/tcp.txt | 31 +++++++++++++------------------ include/net/tcp.h | 2 +- 2 files changed, 14 insertions(+), 19 deletions(-) (limited to 'Documentation') diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt index bdc4c0db51e1..9c7139d57e57 100644 --- a/Documentation/networking/tcp.txt +++ b/Documentation/networking/tcp.txt @@ -1,7 +1,7 @@ TCP protocol ============ -Last updated: 9 February 2008 +Last updated: 3 June 2017 Contents ======== @@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms. A congestion control mechanism can be registered through functions in tcp_cong.c. The functions used by the congestion control mechanism are registered via passing a tcp_congestion_ops struct to -tcp_register_congestion_control. As a minimum name, ssthresh, -cong_avoid must be valid. +tcp_register_congestion_control. As a minimum, the congestion control +mechanism must provide a valid name and must implement either ssthresh, +cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook. Private data for a congestion control mechanism is stored in tp->ca_priv. tcp_ca(tp) returns a pointer to this space. This is preallocated space - it is important to check the size of your private data will fit this space, or -alternatively space could be allocated elsewhere and a pointer to it could +alternatively, space could be allocated elsewhere and a pointer to it could be stored here. There are three kinds of congestion control algorithms currently: The simplest ones are derived from TCP reno (highspeed, scalable) and just -provide an alternative the congestion window calculation. More complex +provide an alternative congestion window calculation. More complex ones like BIC try to look at other events to provide better heuristics. There are also round trip time based algorithms like Vegas and Westwood+. @@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm needs to maintain fairness and performance. Please review current research and RFC's before developing new modules. -The method that is used to determine which congestion control mechanism is -determined by the setting of the sysctl net.ipv4.tcp_congestion_control. -The default congestion control will be the last one registered (LIFO); -so if you built everything as modules, the default will be reno. If you -build with the defaults from Kconfig, then CUBIC will be builtin (not a -module) and it will end up the default. +The default congestion control mechanism is chosen based on the +DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default +value then you can set it using sysctl net.ipv4.tcp_congestion_control. The +module will be autoloaded if needed and you will get the expected protocol. If +you ask for an unknown congestion method, then the sysctl attempt will fail. -If you really want a particular default value then you will need -to set it with the sysctl. If you use a sysctl, the module will be autoloaded -if needed and you will get the expected protocol. If you ask for an -unknown congestion method, then the sysctl attempt will fail. - -If you remove a tcp congestion control module, then you will get the next +If you remove a TCP congestion control module, then you will get the next available one. Since reno cannot be built as a module, and cannot be -deleted, it will always be available. +removed, it will always be available. How the new TCP output machine [nyi] works. =========================================== diff --git a/include/net/tcp.h b/include/net/tcp.h index 38a7427ae902..be6223c586fa 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -924,7 +924,7 @@ struct tcp_congestion_ops { void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); /* call when ack arrives (optional) */ void (*in_ack_event)(struct sock *sk, u32 flags); - /* new value of cwnd after loss (optional) */ + /* new value of cwnd after loss (required) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); -- cgit v1.2.3 From e4061d572cef1f990bb7761f45d00342daa27fbd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jun 2017 19:01:37 +0300 Subject: net: fix up hash documentation commit 61b905da33 ("net: Rename skb->rxhash to skb->hash") didn't update the documentation, fix this up. Cc: Tom Herbert Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- Documentation/networking/scaling.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Documentation') diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 59f4db2a0c85..f55639d71d35 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware or will be computed in the stack. Capable hardware can pass the hash in the receive descriptor for the packet; this would usually be the same hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in -skb->rx_hash and can be used elsewhere in the stack as a hash of the +skb->hash and can be used elsewhere in the stack as a hash of the packet’s flow. Each receive hardware queue has an associated list of CPUs to which -- cgit v1.2.3 From 1be7107fbe18eed3e319a6c3e83c78254b693acb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2017 04:03:24 -0700 Subject: mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov Original-patch-by: Michal Hocko Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Tested-by: Helge Deller # parisc Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 7 ++ arch/arc/mm/mmap.c | 2 +- arch/arm/mm/mmap.c | 4 +- arch/frv/mm/elf-fdpic.c | 2 +- arch/mips/mm/mmap.c | 2 +- arch/parisc/kernel/sys_parisc.c | 15 ++- arch/powerpc/mm/hugetlbpage-radix.c | 2 +- arch/powerpc/mm/mmap.c | 4 +- arch/powerpc/mm/slice.c | 2 +- arch/s390/mm/mmap.c | 4 +- arch/sh/mm/mmap.c | 4 +- arch/sparc/kernel/sys_sparc_64.c | 4 +- arch/sparc/mm/hugetlbpage.c | 2 +- arch/tile/mm/hugetlbpage.c | 2 +- arch/x86/kernel/sys_x86_64.c | 4 +- arch/x86/mm/hugetlbpage.c | 2 +- arch/xtensa/kernel/syscall.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/proc/task_mmu.c | 4 - include/linux/mm.h | 53 ++++----- mm/gup.c | 5 - mm/memory.c | 38 ------ mm/mmap.c | 149 ++++++++++++++---------- 23 files changed, 152 insertions(+), 163 deletions(-) (limited to 'Documentation') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0f5c3b4347c6..7737ab5d04b2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3811,6 +3811,13 @@ expediting. Set to zero to disable automatic expediting. + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 3e25e8d6486b..2e13683dfb24 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2239fde10b80..f0701d8d24df 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index da82c25301e7..46aa289c5102 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 64dd8bdd92c3..28adeabe851f 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index e5288638a1d9..378a754ca186 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; @@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } @@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; @@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 6575b9aabef4..a12e86395025 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 9dbd2a733d6b..0ee6be4f1ba4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 966b9fccfa66..45f6740dd407 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b017daed6887..b854b1da281a 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } @@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 08e7af0be4a7..6a1a1297baae 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index ef4520efc813..043544d0cda3 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7c29d38e6b99..88855e383b34 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index cb10153b5c9f..03e5cc4e76e4 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 207b8f2582c7..213ddf3e937d 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 302f43fd9c28..adad702b39cd 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 06937928cb72..74afbf02d07e 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dde861387a40..d44f5456eb9b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0c8b33d99b1..520802da059c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/include/linux/mm.h b/include/linux/mm.h index b892e95d4929..6f543a47fc92 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; @@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma); static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } #endif -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, @@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/gup.c b/mm/gup.c index b3c7214d710d..576c4df58882 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; - /* For mm_populate(), just skip the stack guard page. */ - if ((*flags & FOLL_POPULATE) && - (stack_guard_page_start(vma, address) || - stack_guard_page_end(vma, address + PAGE_SIZE))) - return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) diff --git a/mm/memory.c b/mm/memory.c index 2e65df1831d9..bb11c474857e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2854,40 +2854,6 @@ out_release: return ret; } -/* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, vmf->address) < 0) - return VM_FAULT_SIGSEGV; - /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created diff --git a/mm/mmap.c b/mm/mmap.c index f82741e199c0..8e07976d5e47 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; LIST_HEAD(uf); @@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -253,10 +255,22 @@ out: static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm) anon_vma_unlock_read(anon_vma); } - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -856,7 +870,7 @@ again: vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -941,7 +955,7 @@ again: * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ - VM_WARN_ON(mm->highest_vm_end != end); + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) @@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1798,7 +1812,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) @@ -1825,8 +1839,8 @@ check_current: vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1890,7 +1904,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1903,7 +1917,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) @@ -1929,7 +1943,7 @@ check_current: struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1967,7 +1981,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1978,9 +1992,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2003,7 +2018,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -2018,9 +2033,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2155,21 +2171,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2207,17 +2221,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *next; + unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else + address &= PAGE_MASK; + address += PAGE_SIZE; + if (!address) return -ENOMEM; + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + if (gap_addr < address) + return -ENOMEM; + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2261,7 +2288,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = address; + mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2282,6 +2309,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + unsigned long gap_addr; int error; address &= PAGE_MASK; @@ -2289,6 +2318,17 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2343,28 +2383,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL< Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- Documentation/DocBook/kernel-hacking.tmpl | 2 +- Documentation/filesystems/autofs4.txt | 12 ++-- block/blk-mq.c | 2 +- block/blk-wbt.c | 2 +- block/kyber-iosched.c | 8 +-- drivers/bluetooth/btmrvl_main.c | 2 +- drivers/char/ipmi/ipmi_watchdog.c | 2 +- drivers/gpu/drm/i915/i915_gem_request.h | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 14 ++--- drivers/gpu/drm/i915/i915_sw_fence.h | 2 +- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_fence.c | 2 +- drivers/gpu/vga/vgaarb.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_main.c | 2 +- drivers/md/bcache/btree.h | 2 +- drivers/net/ethernet/cavium/liquidio/octeon_main.h | 4 +- drivers/net/wireless/cisco/airo.c | 2 +- .../net/wireless/intersil/hostap/hostap_ioctl.c | 2 +- drivers/net/wireless/marvell/libertas/main.c | 2 +- drivers/scsi/dpt/dpti_i2o.h | 2 +- drivers/scsi/ips.c | 12 ++-- drivers/scsi/ips.h | 4 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 6 +- .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 4 +- drivers/staging/lustre/lnet/libcfs/debug.c | 2 +- drivers/staging/lustre/lnet/libcfs/tracefile.c | 2 +- drivers/staging/lustre/lnet/lnet/lib-eq.c | 2 +- drivers/staging/lustre/lnet/lnet/lib-socket.c | 2 +- drivers/staging/lustre/lustre/fid/fid_request.c | 6 +- drivers/staging/lustre/lustre/include/lustre_lib.h | 4 +- drivers/staging/lustre/lustre/llite/lcommon_cl.c | 2 +- .../staging/lustre/lustre/lov/lov_cl_internal.h | 2 +- drivers/staging/lustre/lustre/lov/lov_object.c | 2 +- drivers/staging/lustre/lustre/obdclass/lu_object.c | 6 +- drivers/tty/synclink_gt.c | 2 +- drivers/vfio/virqfd.c | 2 +- drivers/vhost/vhost.c | 2 +- drivers/vhost/vhost.h | 2 +- fs/autofs4/autofs_i.h | 2 +- fs/autofs4/waitq.c | 18 +++--- fs/cachefiles/internal.h | 2 +- fs/cachefiles/namei.c | 2 +- fs/cachefiles/rdwr.c | 2 +- fs/dax.c | 4 +- fs/eventfd.c | 2 +- fs/eventpoll.c | 10 ++-- fs/fs_pin.c | 2 +- fs/nfs/nfs4proc.c | 4 +- fs/nilfs2/segment.c | 2 +- fs/orangefs/orangefs-bufmap.c | 4 +- fs/reiserfs/journal.c | 2 +- fs/select.c | 4 +- fs/signalfd.c | 2 +- fs/userfaultfd.c | 8 +-- include/linux/blk-mq.h | 2 +- include/linux/eventfd.h | 4 +- include/linux/kvm_irqfd.h | 2 +- include/linux/pagemap.h | 2 +- include/linux/poll.h | 2 +- include/linux/vfio.h | 2 +- include/linux/wait.h | 67 +++++++++++----------- include/net/af_unix.h | 2 +- include/uapi/linux/auto_fs.h | 4 +- include/uapi/linux/auto_fs4.h | 4 +- kernel/exit.c | 4 +- kernel/futex.c | 2 +- kernel/sched/completion.c | 2 +- kernel/sched/core.c | 2 +- kernel/sched/wait.c | 42 +++++++------- kernel/workqueue.c | 4 +- mm/filemap.c | 10 ++-- mm/memcontrol.c | 8 +-- mm/mempool.c | 2 +- mm/shmem.c | 2 +- net/9p/trans_fd.c | 4 +- net/bluetooth/bnep/core.c | 2 +- net/bluetooth/cmtp/core.c | 2 +- net/bluetooth/hidp/core.c | 2 +- net/core/datagram.c | 2 +- net/unix/af_unix.c | 4 +- sound/core/control.c | 2 +- sound/core/hwdep.c | 2 +- sound/core/init.c | 2 +- sound/core/oss/pcm_oss.c | 4 +- sound/core/pcm_lib.c | 2 +- sound/core/pcm_native.c | 4 +- sound/core/rawmidi.c | 8 +-- sound/core/seq/seq_fifo.c | 2 +- sound/core/seq/seq_memory.c | 2 +- sound/core/timer.c | 2 +- sound/isa/wavefront/wavefront_synth.c | 2 +- sound/pci/mixart/mixart_core.c | 4 +- sound/pci/ymfpci/ymfpci_main.c | 2 +- virt/kvm/eventfd.c | 2 +- 94 files changed, 216 insertions(+), 213 deletions(-) (limited to 'Documentation') diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl index da5c087462b1..c3c705591532 100644 --- a/Documentation/DocBook/kernel-hacking.tmpl +++ b/Documentation/DocBook/kernel-hacking.tmpl @@ -819,7 +819,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress); certain condition is true. They must be used carefully to ensure there is no race condition. You declare a wait_queue_head_t, and then processes which want to - wait for that condition declare a wait_queue_t + wait for that condition declare a wait_queue_entry_t referring to themselves, and place that in the queue. diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt index f10dd590f69f..8444dc3d57e8 100644 --- a/Documentation/filesystems/autofs4.txt +++ b/Documentation/filesystems/autofs4.txt @@ -316,7 +316,7 @@ For version 5, the format of the message is: struct autofs_v5_packet { int proto_version; /* Protocol version */ int type; /* Type of packet */ - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; __u32 dev; __u64 ino; __u32 uid; @@ -341,12 +341,12 @@ The pipe will be set to "packet mode" (equivalent to passing `O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at most one packet, and any unread portion of a packet will be discarded. -The `wait_queue_token` is a unique number which can identify a +The `wait_queue_entry_token` is a unique number which can identify a particular request to be acknowledged. When a message is sent over the pipe the affected dentry is marked as either "active" or "expiring" and other accesses to it block until the message is acknowledged using one of the ioctls below and the relevant -`wait_queue_token`. +`wait_queue_entry_token`. Communicating with autofs: root directory ioctls ------------------------------------------------ @@ -358,7 +358,7 @@ capability, or must be the automount daemon. The available ioctl commands are: - **AUTOFS_IOC_READY**: a notification has been handled. The argument - to the ioctl command is the "wait_queue_token" number + to the ioctl command is the "wait_queue_entry_token" number corresponding to the notification being acknowledged. - **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with the error code `ENOENT`. @@ -382,14 +382,14 @@ The available ioctl commands are: struct autofs_packet_expire_multi { int proto_version; /* Protocol version */ int type; /* Type of packet */ - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; int len; char name[NAME_MAX+1]; }; is required. This is filled in with the name of something that can be unmounted or removed. If nothing can be expired, - `errno` is set to `EAGAIN`. Even though a `wait_queue_token` + `errno` is set to `EAGAIN`. Even though a `wait_queue_entry_token` is present in the structure, no "wait queue" is established and no acknowledgment is needed. - **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..a083f95e04b1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -926,7 +926,7 @@ static bool reorder_tags_to_front(struct list_head *list) return first != NULL; } -static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags, +static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct blk_mq_hw_ctx *hctx; diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 17676f4d7fd1..5f3a37c2784c 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) } static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, - wait_queue_t *wait, unsigned long rw) + wait_queue_entry_t *wait, unsigned long rw) { /* * inc it here even if disabled, since we'll dec it at completion. diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index b9faabc75fdb..b95d6bd714c0 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -99,7 +99,7 @@ struct kyber_hctx_data { struct list_head rqs[KYBER_NUM_DOMAINS]; unsigned int cur_domain; unsigned int batching; - wait_queue_t domain_wait[KYBER_NUM_DOMAINS]; + wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; atomic_t wait_index[KYBER_NUM_DOMAINS]; }; @@ -507,7 +507,7 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd, } } -static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags, +static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); @@ -523,7 +523,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, { unsigned int sched_domain = khd->cur_domain; struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; - wait_queue_t *wait = &khd->domain_wait[sched_domain]; + wait_queue_entry_t *wait = &khd->domain_wait[sched_domain]; struct sbq_wait_state *ws; int nr; @@ -734,7 +734,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ { \ struct blk_mq_hw_ctx *hctx = data; \ struct kyber_hctx_data *khd = hctx->sched_data; \ - wait_queue_t *wait = &khd->domain_wait[domain]; \ + wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ \ seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ return 0; \ diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index c38cb5b91291..fe850f0567cb 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data) struct btmrvl_thread *thread = data; struct btmrvl_private *priv = thread->priv; struct btmrvl_adapter *adapter = priv->adapter; - wait_queue_t wait; + wait_queue_entry_t wait; struct sk_buff *skb; ulong flags; diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index d165af8abe36..a5c6cfe71a8e 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file, loff_t *ppos) { int rv = 0; - wait_queue_t wait; + wait_queue_entry_t wait; if (count <= 0) return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 129c58bb4805..a4a920c4c454 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -123,7 +123,7 @@ struct drm_i915_gem_request { * It is used by the driver to then queue the request for execution. */ struct i915_sw_fence submit; - wait_queue_t submitq; + wait_queue_entry_t submitq; wait_queue_head_t execute; /* A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index a277f8eb7beb..8669bfa33064 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, struct list_head *continuation) { wait_queue_head_t *x = &fence->wait; - wait_queue_t *pos, *next; + wait_queue_entry_t *pos, *next; unsigned long flags; debug_fence_deactivate(fence); @@ -254,7 +254,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence) __i915_sw_fence_commit(fence); } -static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) +static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) { list_del(&wq->task_list); __i915_sw_fence_complete(wq->private, key); @@ -267,7 +267,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void * static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, const struct i915_sw_fence * const signaler) { - wait_queue_t *wq; + wait_queue_entry_t *wq; if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) return false; @@ -288,7 +288,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) { - wait_queue_t *wq; + wait_queue_entry_t *wq; if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) return; @@ -320,7 +320,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, - wait_queue_t *wq, gfp_t gfp) + wait_queue_entry_t *wq, gfp_t gfp) { unsigned long flags; int pending; @@ -359,7 +359,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, spin_lock_irqsave(&signaler->wait.lock, flags); if (likely(!i915_sw_fence_done(signaler))) { - __add_wait_queue_tail(&signaler->wait, wq); + __add_wait_queue_entry_tail(&signaler->wait, wq); pending = 1; } else { i915_sw_fence_wake(wq, 0, 0, NULL); @@ -372,7 +372,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, - wait_queue_t *wq) + wait_queue_entry_t *wq) { return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index d31cefbbcc04..fd3c3bf6c8b7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence); int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *after, - wait_queue_t *wq); + wait_queue_entry_t *wq); int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, struct i915_sw_fence *after, gfp_t gfp); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c1c8e2208a21..e562a78510ff 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -375,7 +375,7 @@ struct radeon_fence { unsigned ring; bool is_vm_update; - wait_queue_t fence_wake; + wait_queue_entry_t fence_wake; }; int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ef09f0a63754..e86f2bd38410 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev, * for the fence locking itself, so unlocked variants are used for * fence_signal, and remove_wait_queue. */ -static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) +static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct radeon_fence *fence; u64 seq; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 92f1452dad57..76875f6299b8 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { struct vga_device *vgadev, *conflict; unsigned long flags; - wait_queue_t wait; + wait_queue_entry_t wait; int rc = 0; vga_check_first_use(); diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index a3f18a22f5ed..e0f47cc2effc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev, bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) { struct i40iw_device *iwdev; - wait_queue_t wait; + wait_queue_entry_t wait; iwdev = dev->back_dev; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 9b80417cd547..73da1f5626cb 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k); struct btree_op { /* for waiting on btree reserve in btree_split() */ - wait_queue_t wait; + wait_queue_entry_t wait; /* Btree level at which we start taking write locks */ short lock; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index bed9ef17bc26..7ccffbb0019e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -144,7 +144,7 @@ static inline int sleep_cond(wait_queue_head_t *wait_queue, int *condition) { int errno = 0; - wait_queue_t we; + wait_queue_entry_t we; init_waitqueue_entry(&we, current); add_wait_queue(wait_queue, &we); @@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue, int *condition, int timeout) { - wait_queue_t we; + wait_queue_entry_t we; init_waitqueue_entry(&we, current); add_wait_queue(wait_queue, &we); diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 1b7e125a28e2..6a13303af2b7 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -3066,7 +3066,7 @@ static int airo_thread(void *data) { if (ai->jobs) { locked = down_interruptible(&ai->sem); } else { - wait_queue_t wait; + wait_queue_entry_t wait; init_waitqueue_entry(&wait, current); add_wait_queue(&ai->thr_wait, &wait); diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index b2c6b065b542..ff153ce29539 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev, ret = -EINVAL; } if (local->iw_mode == IW_MODE_MASTER) { - wait_queue_t __wait; + wait_queue_entry_t __wait; init_waitqueue_entry(&__wait, current); add_wait_queue(&local->hostscan_wq, &__wait); set_current_state(TASK_INTERRUPTIBLE); diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index e3500203715c..dde065d0d5c1 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -453,7 +453,7 @@ static int lbs_thread(void *data) { struct net_device *dev = data; struct lbs_private *priv = dev->ml_priv; - wait_queue_t wait; + wait_queue_entry_t wait; lbs_deb_enter(LBS_DEB_THREAD); diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h index bd9e31e16249..16fc380b5512 100644 --- a/drivers/scsi/dpt/dpti_i2o.h +++ b/drivers/scsi/dpt/dpti_i2o.h @@ -48,7 +48,7 @@ #include typedef wait_queue_head_t adpt_wait_queue_head_t; #define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait) -typedef wait_queue_t adpt_wait_queue_t; +typedef wait_queue_entry_t adpt_wait_queue_entry_t; /* * message structures diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 3419e1bcdff6..67621308eb9c 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); static uint32_t ips_statupd_morpheus(ips_ha_t *); static ips_scb_t *ips_getscb(ips_ha_t *); static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); -static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *); +static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *); static void ips_putq_copp_tail(ips_copp_queue_t *, ips_copp_wait_item_t *); static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); -static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *); -static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *, +static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *); +static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *, struct scsi_cmnd *); static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, ips_copp_wait_item_t *); @@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ -static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item) +static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) { METHOD_TRACE("ips_putq_wait_tail", 1); @@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item) /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ -static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue) +static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue) { struct scsi_cmnd *item; @@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue) /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ -static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue, +static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) { struct scsi_cmnd *p; diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index b782bb60baf0..366be3b2f9b4 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h @@ -989,7 +989,7 @@ typedef struct ips_wait_queue { struct scsi_cmnd *head; struct scsi_cmnd *tail; int count; -} ips_wait_queue_t; +} ips_wait_queue_entry_t; typedef struct ips_copp_wait_item { struct scsi_cmnd *scsi_cmd; @@ -1035,7 +1035,7 @@ typedef struct ips_ha { ips_stat_t sp; /* Status packer pointer */ struct ips_scb *scbs; /* Array of all CCBS */ struct ips_scb *scb_freelist; /* SCB free list */ - ips_wait_queue_t scb_waitlist; /* Pending SCB list */ + ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */ ips_copp_queue_t copp_waitlist; /* Pending PT list */ ips_scb_queue_t scb_activelist; /* Active SCB list */ IPS_IO_CMD *dummy; /* dummy command */ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 0db662d6abdd..85b242ec5f9b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -3267,7 +3267,7 @@ int kiblnd_connd(void *arg) { spinlock_t *lock = &kiblnd_data.kib_connd_lock; - wait_queue_t wait; + wait_queue_entry_t wait; unsigned long flags; struct kib_conn *conn; int timeout; @@ -3521,7 +3521,7 @@ kiblnd_scheduler(void *arg) long id = (long)arg; struct kib_sched_info *sched; struct kib_conn *conn; - wait_queue_t wait; + wait_queue_entry_t wait; unsigned long flags; struct ib_wc wc; int did_something; @@ -3656,7 +3656,7 @@ kiblnd_failover_thread(void *arg) { rwlock_t *glock = &kiblnd_data.kib_global_lock; struct kib_dev *dev; - wait_queue_t wait; + wait_queue_entry_t wait; unsigned long flags; int rc; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 3ed3b08c122c..6b38d5a8fe92 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -2166,7 +2166,7 @@ ksocknal_connd(void *arg) { spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; struct ksock_connreq *cr; - wait_queue_t wait; + wait_queue_entry_t wait; int nloops = 0; int cons_retry = 0; @@ -2554,7 +2554,7 @@ ksocknal_check_peer_timeouts(int idx) int ksocknal_reaper(void *arg) { - wait_queue_t wait; + wait_queue_entry_t wait; struct ksock_conn *conn; struct ksock_sched *sched; struct list_head enomem_conns; diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index c56e9922cd5b..49deb448b044 100644 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -361,7 +361,7 @@ static int libcfs_debug_dumplog_thread(void *arg) void libcfs_debug_dumplog(void) { - wait_queue_t wait; + wait_queue_entry_t wait; struct task_struct *dumper; /* we're being careful to ensure that the kernel thread is diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 9599b7441feb..27082d2f7938 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -990,7 +990,7 @@ static int tracefiled(void *arg) complete(&tctl->tctl_start); while (1) { - wait_queue_t __wait; + wait_queue_entry_t __wait; pc.pc_want_daemon_pages = 0; collect_pages(&pc); diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index ce4b83584e17..9ebba4ef5f90 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c @@ -312,7 +312,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) { int tms = *timeout_ms; int wait; - wait_queue_t wl; + wait_queue_entry_t wl; unsigned long now; if (!tms) diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index 9fca8d225ee0..f075706bba6d 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -516,7 +516,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, int lnet_sock_accept(struct socket **newsockp, struct socket *sock) { - wait_queue_t wait; + wait_queue_entry_t wait; struct socket *newsock; int rc; diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index 999f250ceed0..bf31bc200d27 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c @@ -192,7 +192,7 @@ static int seq_client_alloc_seq(const struct lu_env *env, } static int seq_fid_alloc_prep(struct lu_client_seq *seq, - wait_queue_t *link) + wait_queue_entry_t *link) { if (seq->lcs_update) { add_wait_queue(&seq->lcs_waitq, link); @@ -223,7 +223,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq) int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq, struct lu_fid *fid) { - wait_queue_t link; + wait_queue_entry_t link; int rc; LASSERT(seq); @@ -290,7 +290,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid); */ void seq_client_flush(struct lu_client_seq *seq) { - wait_queue_t link; + wait_queue_entry_t link; LASSERT(seq); init_waitqueue_entry(&link, current); diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index b04d613846ee..f24970da8323 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -201,7 +201,7 @@ struct l_wait_info { sigmask(SIGALRM)) /** - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively + * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively * waiting threads, which is not always desirable because all threads will * be waken up again and again, even user only needs a few of them to be * active most time. This is not good for performance because cache can @@ -228,7 +228,7 @@ struct l_wait_info { */ #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ do { \ - wait_queue_t __wait; \ + wait_queue_entry_t __wait; \ long __timeout = info->lwi_timeout; \ sigset_t __blocked; \ int __allow_intr = info->lwi_allow_intr; \ diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c index 8af611033e12..96515b839436 100644 --- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c @@ -207,7 +207,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) { struct lu_object_header *header = obj->co_lu.lo_header; - wait_queue_t waiter; + wait_queue_entry_t waiter; if (unlikely(atomic_read(&header->loh_ref) != 1)) { struct lu_site *site = obj->co_lu.lo_dev->ld_site; diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 391c632365ae..e889d3a7de9c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -370,7 +370,7 @@ struct lov_thread_info { struct ost_lvb lti_lvb; struct cl_2queue lti_cl2q; struct cl_page_list lti_plist; - wait_queue_t lti_waiter; + wait_queue_entry_t lti_waiter; struct cl_attr lti_attr; }; diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index ab3ecfeeadc8..eddabbe31e5c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -371,7 +371,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, struct lov_layout_raid0 *r0; struct lu_site *site; struct lu_site_bkt_data *bkt; - wait_queue_t *waiter; + wait_queue_entry_t *waiter; r0 = &lov->u.raid0; LASSERT(r0->lo_sub[idx] == los); diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index abcf951208d2..76ae600ae2c8 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -556,7 +556,7 @@ EXPORT_SYMBOL(lu_object_print); static struct lu_object *htable_lookup(struct lu_site *s, struct cfs_hash_bd *bd, const struct lu_fid *f, - wait_queue_t *waiter, + wait_queue_entry_t *waiter, __u64 *version) { struct lu_site_bkt_data *bkt; @@ -670,7 +670,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, struct lu_device *dev, const struct lu_fid *f, const struct lu_object_conf *conf, - wait_queue_t *waiter) + wait_queue_entry_t *waiter) { struct lu_object *o; struct lu_object *shadow; @@ -750,7 +750,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, { struct lu_site_bkt_data *bkt; struct lu_object *obj; - wait_queue_t wait; + wait_queue_entry_t wait; while (1) { obj = lu_object_find_try(env, dev, f, conf, &wait); diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 31885f20fc15..cc047de72e2a 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -184,7 +184,7 @@ static void hdlcdev_exit(struct slgt_info *info); struct cond_wait { struct cond_wait *next; wait_queue_head_t q; - wait_queue_t wait; + wait_queue_entry_t wait; unsigned int data; }; static void init_cond_wait(struct cond_wait *w, unsigned int data); diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 27c89cd5d70b..4797217e5e72 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -43,7 +43,7 @@ static void virqfd_deactivate(struct virqfd *virqfd) queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); } -static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct virqfd *virqfd = container_of(wait, struct virqfd, wait); unsigned long flags = (unsigned long)key; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 042030e5a035..e4613a3c362d 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -165,7 +165,7 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, add_wait_queue(wqh, &poll->wait); } -static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, +static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f55671d53f28..f72095868b93 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -31,7 +31,7 @@ struct vhost_work { struct vhost_poll { poll_table table; wait_queue_head_t *wqh; - wait_queue_t wait; + wait_queue_entry_t wait; struct vhost_work work; unsigned long mask; struct vhost_dev *dev; diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index beef981aa54f..974f5346458a 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -83,7 +83,7 @@ struct autofs_info { struct autofs_wait_queue { wait_queue_head_t queue; struct autofs_wait_queue *next; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; /* We use the following to see what we are waiting for */ struct qstr name; u32 dev; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 24a58bf9ca72..7071895b0678 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, size_t pktsz; pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", - (unsigned long) wq->wait_queue_token, + (unsigned long) wq->wait_queue_entry_token, wq->name.len, wq->name.name, type); memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ @@ -120,7 +120,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, pktsz = sizeof(*mp); - mp->wait_queue_token = wq->wait_queue_token; + mp->wait_queue_entry_token = wq->wait_queue_entry_token; mp->len = wq->name.len; memcpy(mp->name, wq->name.name, wq->name.len); mp->name[wq->name.len] = '\0'; @@ -133,7 +133,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, pktsz = sizeof(*ep); - ep->wait_queue_token = wq->wait_queue_token; + ep->wait_queue_entry_token = wq->wait_queue_entry_token; ep->len = wq->name.len; memcpy(ep->name, wq->name.name, wq->name.len); ep->name[wq->name.len] = '\0'; @@ -153,7 +153,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, pktsz = sizeof(*packet); - packet->wait_queue_token = wq->wait_queue_token; + packet->wait_queue_entry_token = wq->wait_queue_entry_token; packet->len = wq->name.len; memcpy(packet->name, wq->name.name, wq->name.len); packet->name[wq->name.len] = '\0'; @@ -428,7 +428,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, return -ENOMEM; } - wq->wait_queue_token = autofs4_next_wait_queue; + wq->wait_queue_entry_token = autofs4_next_wait_queue; if (++autofs4_next_wait_queue == 0) autofs4_next_wait_queue = 1; wq->next = sbi->queues; @@ -461,7 +461,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, } pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", - (unsigned long) wq->wait_queue_token, wq->name.len, + (unsigned long) wq->wait_queue_entry_token, wq->name.len, wq->name.name, notify); /* @@ -471,7 +471,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, } else { wq->wait_ctr++; pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", - (unsigned long) wq->wait_queue_token, wq->name.len, + (unsigned long) wq->wait_queue_entry_token, wq->name.len, wq->name.name, notify); mutex_unlock(&sbi->wq_mutex); kfree(qstr.name); @@ -550,13 +550,13 @@ int autofs4_wait(struct autofs_sb_info *sbi, } -int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) +int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status) { struct autofs_wait_queue *wq, **wql; mutex_lock(&sbi->wq_mutex); for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { - if (wq->wait_queue_token == wait_queue_token) + if (wq->wait_queue_entry_token == wait_queue_entry_token) break; } diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 9bf90bcc56ac..54a4fcd679ed 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -97,7 +97,7 @@ struct cachefiles_cache { * backing file read tracking */ struct cachefiles_one_read { - wait_queue_t monitor; /* link into monitored waitqueue */ + wait_queue_entry_t monitor; /* link into monitored waitqueue */ struct page *back_page; /* backing file page we're waiting for */ struct page *netfs_page; /* netfs page we're going to fill */ struct fscache_retrieval *op; /* retrieval op covering this */ diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 41df8a27d7eb..3978b324cbca 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -204,7 +204,7 @@ wait_for_old_object: wait_queue_head_t *wq; signed long timeout = 60 * HZ; - wait_queue_t wait; + wait_queue_entry_t wait; bool requeue; /* if the object we're waiting for is queued for processing, diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index afbdc418966d..8be33b33b981 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -21,7 +21,7 @@ * - we use this to detect read completion of backing pages * - the caller holds the waitqueue lock */ -static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, +static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, int sync, void *_key) { struct cachefiles_one_read *monitor = diff --git a/fs/dax.c b/fs/dax.c index 2a6889b3585f..323ea481d4a8 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -84,7 +84,7 @@ struct exceptional_entry_key { }; struct wait_exceptional_entry_queue { - wait_queue_t wait; + wait_queue_entry_t wait; struct exceptional_entry_key key; }; @@ -108,7 +108,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, return wait_table + hash; } -static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, +static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, int sync, void *keyp) { struct exceptional_entry_key *key = keyp; diff --git a/fs/eventfd.c b/fs/eventfd.c index 68b9fffcb2c8..9736df2ce89d 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -191,7 +191,7 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) * This is used to atomically remove a wait queue entry from the eventfd wait * queue head, and read/reset the counter value. */ -int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt) { unsigned long flags; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5420767c9b68..5ac1cba5ef72 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -244,7 +244,7 @@ struct eppoll_entry { * Wait queue item that will be linked to the target file wait * queue head. */ - wait_queue_t wait; + wait_queue_entry_t wait; /* The wait queue head that linked the "wait" wait queue item */ wait_queue_head_t *whead; @@ -347,13 +347,13 @@ static inline int ep_is_linked(struct list_head *p) return !list_empty(p); } -static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) +static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p) { return container_of(p, struct eppoll_entry, wait); } /* Get the "struct epitem" from a wait queue pointer */ -static inline struct epitem *ep_item_from_wait(wait_queue_t *p) +static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) { return container_of(p, struct eppoll_entry, wait)->base; } @@ -1078,7 +1078,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) * mechanism. It is called by the stored file descriptors when they * have events to report. */ -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; @@ -1699,7 +1699,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int res = 0, eavail, timed_out = 0; unsigned long flags; u64 slack = 0; - wait_queue_t wait; + wait_queue_entry_t wait; ktime_t expires, *to = NULL; if (timeout > 0) { diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 611b5408f6ec..7b447a245760 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -34,7 +34,7 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m) void pin_kill(struct fs_pin *p) { - wait_queue_t wait; + wait_queue_entry_t wait; if (!p) { rcu_read_unlock(); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c08c46a3b8cd..be5a8f84e5bb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6372,7 +6372,7 @@ struct nfs4_lock_waiter { }; static int -nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key) +nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) { int ret; struct cb_notify_lock_args *cbnl = key; @@ -6415,7 +6415,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) .inode = state->inode, .owner = &owner, .notified = false }; - wait_queue_t wait; + wait_queue_entry_t wait; /* Don't bother with waitqueue if we don't expect a callback */ if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index febed1217b3f..775304e7f96f 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2161,7 +2161,7 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino) } struct nilfs_segctor_wait_request { - wait_queue_t wq; + wait_queue_entry_t wq; __u32 seq; int err; atomic_t done; diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 83b506020718..9e37b7028ea4 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -47,7 +47,7 @@ static void run_down(struct slot_map *m) if (m->c != -1) { for (;;) { if (likely(list_empty(&wait.task_list))) - __add_wait_queue_tail(&m->q, &wait); + __add_wait_queue_entry_tail(&m->q, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (m->c == -1) @@ -85,7 +85,7 @@ static int wait_for_free(struct slot_map *m) do { long n = left, t; if (likely(list_empty(&wait.task_list))) - __add_wait_queue_tail_exclusive(&m->q, &wait); + __add_wait_queue_entry_tail_exclusive(&m->q, &wait); set_current_state(TASK_INTERRUPTIBLE); if (m->c > 0) diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 39bb1e838d8d..a11d773e5ff3 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2956,7 +2956,7 @@ void reiserfs_wait_on_write_block(struct super_block *s) static void queue_log_writer(struct super_block *s) { - wait_queue_t wait; + wait_queue_entry_t wait; struct reiserfs_journal *journal = SB_JOURNAL(s); set_bit(J_WRITERS_QUEUED, &journal->j_state); diff --git a/fs/select.c b/fs/select.c index d6c652a31e99..5b524a977d91 100644 --- a/fs/select.c +++ b/fs/select.c @@ -180,7 +180,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) return table->entry++; } -static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct poll_wqueues *pwq = wait->private; DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); @@ -206,7 +206,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) return default_wake_function(&dummy_wait, mode, sync, key); } -static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct poll_table_entry *entry; diff --git a/fs/signalfd.c b/fs/signalfd.c index 7e3d71109f51..593b022ac11b 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -43,7 +43,7 @@ void signalfd_cleanup(struct sighand_struct *sighand) if (likely(!waitqueue_active(wqh))) return; - /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ + /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ wake_up_poll(wqh, POLLHUP | POLLFREE); } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1d622f276e3a..bda64fcd8a0c 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -81,7 +81,7 @@ struct userfaultfd_unmap_ctx { struct userfaultfd_wait_queue { struct uffd_msg msg; - wait_queue_t wq; + wait_queue_entry_t wq; struct userfaultfd_ctx *ctx; bool waken; }; @@ -91,7 +91,7 @@ struct userfaultfd_wake_range { unsigned long len; }; -static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, +static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, int wake_flags, void *key) { struct userfaultfd_wake_range *range = key; @@ -860,7 +860,7 @@ wakeup: static inline struct userfaultfd_wait_queue *find_userfault_in( wait_queue_head_t *wqh) { - wait_queue_t *wq; + wait_queue_entry_t *wq; struct userfaultfd_wait_queue *uwq; VM_BUG_ON(!spin_is_locked(&wqh->lock)); @@ -1747,7 +1747,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd, static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) { struct userfaultfd_ctx *ctx = f->private_data; - wait_queue_t *wq; + wait_queue_entry_t *wq; struct userfaultfd_wait_queue *uwq; unsigned long pending = 0, total = 0; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index fcd641032f8d..95ba83806c5d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,7 +33,7 @@ struct blk_mq_hw_ctx { struct blk_mq_ctx **ctxs; unsigned int nr_ctx; - wait_queue_t dispatch_wait; + wait_queue_entry_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index ff0b981f078e..9e4befd95bc7 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -37,7 +37,7 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); -int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); #else /* CONFIG_EVENTFD */ @@ -73,7 +73,7 @@ static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, } static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, - wait_queue_t *wait, __u64 *cnt) + wait_queue_entry_t *wait, __u64 *cnt) { return -ENOSYS; } diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 0c1de05098c8..76c2fbc59f35 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -46,7 +46,7 @@ struct kvm_kernel_irqfd_resampler { struct kvm_kernel_irqfd { /* Used for MSI fast-path */ struct kvm *kvm; - wait_queue_t wait; + wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; seqcount_t irq_entry_sc; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 316a19f6b635..e7bbd9d4dc6c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -524,7 +524,7 @@ void page_endio(struct page *page, bool is_write, int err); /* * Add an arbitrary waiter to a page's wait queue */ -extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); +extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); /* * Fault everything in given userspace address range in. diff --git a/include/linux/poll.h b/include/linux/poll.h index 75ffc5729e4c..2889f09a1c60 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -75,7 +75,7 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) struct poll_table_entry { struct file *filp; unsigned long key; - wait_queue_t wait; + wait_queue_entry_t wait; wait_queue_head_t *wait_address; }; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index edf9b2cad277..f57076b958b7 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -183,7 +183,7 @@ struct virqfd { void (*thread)(void *, void *); void *data; struct work_struct inject; - wait_queue_t wait; + wait_queue_entry_t wait; poll_table pt; struct work_struct shutdown; struct virqfd **pvirqfd; diff --git a/include/linux/wait.h b/include/linux/wait.h index db076ca7f11d..5889f0c86ff7 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -10,15 +10,18 @@ #include #include -typedef struct __wait_queue wait_queue_t; -typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); -int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); +typedef struct wait_queue_entry wait_queue_entry_t; +typedef int (*wait_queue_func_t)(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); +int default_wake_function(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); -/* __wait_queue::flags */ +/* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 -struct __wait_queue { +/* + * A single wait-queue entry structure: + */ +struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; @@ -34,7 +37,7 @@ struct wait_bit_key { struct wait_bit_queue { struct wait_bit_key key; - wait_queue_t wait; + wait_queue_entry_t wait; }; struct __wait_queue_head { @@ -55,7 +58,7 @@ struct task_struct; .task_list = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ - wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) + wait_queue_entry_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ @@ -88,7 +91,7 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) #endif -static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) +static inline void init_waitqueue_entry(wait_queue_entry_t *q, struct task_struct *p) { q->flags = 0; q->private = p; @@ -96,7 +99,7 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) } static inline void -init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) +init_waitqueue_func_entry(wait_queue_entry_t *q, wait_queue_func_t func) { q->flags = 0; q->private = NULL; @@ -159,11 +162,11 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq) return waitqueue_active(wq); } -extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); -extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); -extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); +extern void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); +extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait); +extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); -static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) +static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new) { list_add(&new->task_list, &head->task_list); } @@ -172,27 +175,27 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) * Used for wake-one threads: */ static inline void -__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) +__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) { wait->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(q, wait); } -static inline void __add_wait_queue_tail(wait_queue_head_t *head, - wait_queue_t *new) +static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, + wait_queue_entry_t *new) { list_add_tail(&new->task_list, &head->task_list); } static inline void -__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) +__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) { wait->flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue_tail(q, wait); + __add_wait_queue_entry_tail(q, wait); } static inline void -__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) +__remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) { list_del(&old->task_list); } @@ -249,7 +252,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); (!__builtin_constant_p(state) || \ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ -extern void init_wait_entry(wait_queue_t *__wait, int flags); +extern void init_wait_entry(wait_queue_entry_t *__wait, int flags); /* * The below macro ___wait_event() has an explicit shadow of the __ret @@ -266,7 +269,7 @@ extern void init_wait_entry(wait_queue_t *__wait, int flags); #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ - wait_queue_t __wait; \ + wait_queue_entry_t __wait; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ @@ -620,8 +623,8 @@ do { \ __ret; \ }) -extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); -extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); +extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); +extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ @@ -967,17 +970,17 @@ do { \ /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ -void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); -void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); -long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); -void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); -long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); -int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); -int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); -int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); +void prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); +void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); +long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); +void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait); +long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout); +int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); +int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); +int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ - wait_queue_t name = { \ + wait_queue_entry_t name = { \ .private = current, \ .func = function, \ .task_list = LIST_HEAD_INIT((name).task_list), \ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index fd60eccb59a6..75e612a45824 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -62,7 +62,7 @@ struct unix_sock { #define UNIX_GC_CANDIDATE 0 #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; - wait_queue_t peer_wake; + wait_queue_entry_t peer_wake; }; static inline struct unix_sock *unix_sk(const struct sock *sk) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index aa63451ef20a..1953f8d6063b 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -26,7 +26,7 @@ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION /* - * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed + * The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed * back to the kernel via ioctl from userspace. On architectures where 32- and * 64-bit userspace binaries can be executed it's important that the size of * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we @@ -49,7 +49,7 @@ struct autofs_packet_hdr { struct autofs_packet_missing { struct autofs_packet_hdr hdr; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; int len; char name[NAME_MAX+1]; }; diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 7c6da423d54e..65b72d0222e7 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -108,7 +108,7 @@ enum autofs_notify { /* v4 multi expire (via pipe) */ struct autofs_packet_expire_multi { struct autofs_packet_hdr hdr; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; int len; char name[NAME_MAX+1]; }; @@ -123,7 +123,7 @@ union autofs_packet_union { /* autofs v5 common packet struct */ struct autofs_v5_packet { struct autofs_packet_hdr hdr; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_entry_token; __u32 dev; __u64 ino; __u32 uid; diff --git a/kernel/exit.c b/kernel/exit.c index 516acdb0e0ec..7d694437ab44 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1004,7 +1004,7 @@ struct wait_opts { int __user *wo_stat; struct rusage __user *wo_rusage; - wait_queue_t child_wait; + wait_queue_entry_t child_wait; int notask_error; }; @@ -1541,7 +1541,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) return 0; } -static int child_wait_callback(wait_queue_t *wait, unsigned mode, +static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct wait_opts *wo = container_of(wait, struct wait_opts, diff --git a/kernel/futex.c b/kernel/futex.c index 357348a6cf6b..d6cf71d08f21 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -225,7 +225,7 @@ struct futex_pi_state { * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * - * We use this hashed waitqueue, instead of a normal wait_queue_t, so + * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 53f9558fa925..13fc5ae9bf2f 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -66,7 +66,7 @@ do_wait_for_common(struct completion *x, if (!x->done) { DECLARE_WAITQUEUE(wait, current); - __add_wait_queue_tail_exclusive(&x->wait, &wait); + __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 326d4f88e2b1..5b36644536ab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3687,7 +3687,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) exception_exit(prev_state); } -int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, +int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, void *key) { return try_to_wake_up(curr->private, mode, wake_flags); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index b8c84c6dee64..301ea02dede0 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -21,7 +21,7 @@ void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_c EXPORT_SYMBOL(__init_waitqueue_head); -void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) +void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) { unsigned long flags; @@ -32,18 +32,18 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) } EXPORT_SYMBOL(add_wait_queue); -void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) +void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); - __add_wait_queue_tail(q, wait); + __add_wait_queue_entry_tail(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue_exclusive); -void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) +void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) { unsigned long flags; @@ -66,7 +66,7 @@ EXPORT_SYMBOL(remove_wait_queue); static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int wake_flags, void *key) { - wait_queue_t *curr, *next; + wait_queue_entry_t *curr, *next; list_for_each_entry_safe(curr, next, &q->task_list, task_list) { unsigned flags = curr->flags; @@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ * loads to move into the critical region). */ void -prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) +prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) { unsigned long flags; @@ -184,20 +184,20 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) EXPORT_SYMBOL(prepare_to_wait); void -prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) +prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) - __add_wait_queue_tail(q, wait); + __add_wait_queue_entry_tail(q, wait); set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait_exclusive); -void init_wait_entry(wait_queue_t *wait, int flags) +void init_wait_entry(wait_queue_entry_t *wait, int flags) { wait->flags = flags; wait->private = current; @@ -206,7 +206,7 @@ void init_wait_entry(wait_queue_t *wait, int flags) } EXPORT_SYMBOL(init_wait_entry); -long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) +long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) { unsigned long flags; long ret = 0; @@ -230,7 +230,7 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) } else { if (list_empty(&wait->task_list)) { if (wait->flags & WQ_FLAG_EXCLUSIVE) - __add_wait_queue_tail(q, wait); + __add_wait_queue_entry_tail(q, wait); else __add_wait_queue(q, wait); } @@ -249,10 +249,10 @@ EXPORT_SYMBOL(prepare_to_wait_event); * condition in the caller before they add the wait * entry to the wake queue. */ -int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) +int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) { if (likely(list_empty(&wait->task_list))) - __add_wait_queue_tail(wq, wait); + __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) @@ -265,10 +265,10 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) } EXPORT_SYMBOL(do_wait_intr); -int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) +int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) { if (likely(list_empty(&wait->task_list))) - __add_wait_queue_tail(wq, wait); + __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) @@ -290,7 +290,7 @@ EXPORT_SYMBOL(do_wait_intr_irq); * the wait descriptor from the given waitqueue if still * queued. */ -void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) +void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait) { unsigned long flags; @@ -316,7 +316,7 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) } EXPORT_SYMBOL(finish_wait); -int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) +int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); @@ -351,7 +351,7 @@ static inline bool is_kthread_should_stop(void) * remove_wait_queue(&wq, &wait); * */ -long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) +long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout) { set_current_state(mode); /* A */ /* @@ -375,7 +375,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) } EXPORT_SYMBOL(wait_woken); -int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) +int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { /* * Although this function is called under waitqueue lock, LOCK @@ -391,7 +391,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) } EXPORT_SYMBOL(woken_wake_function); -int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) +int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct wait_bit_key *key = arg; struct wait_bit_queue *wait_bit @@ -534,7 +534,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) return bit_waitqueue(p, 0); } -static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, +static int wake_atomic_t_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct wait_bit_key *key = arg; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c74bf39ef764..a86688fabc55 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2864,11 +2864,11 @@ bool flush_work(struct work_struct *work) EXPORT_SYMBOL_GPL(flush_work); struct cwt_wait { - wait_queue_t wait; + wait_queue_entry_t wait; struct work_struct *work; }; -static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); diff --git a/mm/filemap.c b/mm/filemap.c index 6f1be573a5e6..80c19ee81e95 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -768,10 +768,10 @@ struct wait_page_key { struct wait_page_queue { struct page *page; int bit_nr; - wait_queue_t wait; + wait_queue_entry_t wait; }; -static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) +static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct wait_page_key *key = arg; struct wait_page_queue *wait_page @@ -834,7 +834,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, struct page *page, int bit_nr, int state, bool lock) { struct wait_page_queue wait_page; - wait_queue_t *wait = &wait_page.wait; + wait_queue_entry_t *wait = &wait_page.wait; int ret = 0; init_wait(wait); @@ -847,7 +847,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, if (likely(list_empty(&wait->task_list))) { if (lock) - __add_wait_queue_tail_exclusive(q, wait); + __add_wait_queue_entry_tail_exclusive(q, wait); else __add_wait_queue(q, wait); SetPageWaiters(page); @@ -907,7 +907,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) * * Add an arbitrary @waiter to the wait queue for the nominated @page. */ -void add_page_wait_queue(struct page *page, wait_queue_t *waiter) +void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) { wait_queue_head_t *q = page_waitqueue(page); unsigned long flags; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 94172089f52f..9a90b096dc6b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -170,7 +170,7 @@ struct mem_cgroup_event { */ poll_table pt; wait_queue_head_t *wqh; - wait_queue_t wait; + wait_queue_entry_t wait; struct work_struct remove; }; @@ -1479,10 +1479,10 @@ static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); struct oom_wait_info { struct mem_cgroup *memcg; - wait_queue_t wait; + wait_queue_entry_t wait; }; -static int memcg_oom_wake_function(wait_queue_t *wait, +static int memcg_oom_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; @@ -3725,7 +3725,7 @@ static void memcg_event_remove(struct work_struct *work) * * Called with wqh->lock held and interrupts disabled. */ -static int memcg_event_wake(wait_queue_t *wait, unsigned mode, +static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct mem_cgroup_event *event = diff --git a/mm/mempool.c b/mm/mempool.c index 47a659dedd44..1c0294858527 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -312,7 +312,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; - wait_queue_t wait; + wait_queue_entry_t wait; gfp_t gfp_temp; VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); diff --git a/mm/shmem.c b/mm/shmem.c index e67d6ba4e98e..a6c7dece4660 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1902,7 +1902,7 @@ unlock: * entry unconditionally - even if something else had already woken the * target. */ -static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); list_del_init(&wait->task_list); diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 7bc2208b6cc4..dca3cdd1a014 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -95,7 +95,7 @@ enum { struct p9_poll_wait { struct p9_conn *conn; - wait_queue_t wait; + wait_queue_entry_t wait; wait_queue_head_t *wait_addr; }; @@ -522,7 +522,7 @@ error: clear_bit(Wworksched, &m->wsched); } -static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) +static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index fbf251fef70f..5c4808b3da2d 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -484,7 +484,7 @@ static int bnep_session(void *arg) struct net_device *dev = s->dev; struct sock *sk = s->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + wait_queue_entry_t wait; BT_DBG(""); diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 9e59b6654126..14f7c8135c31 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -280,7 +280,7 @@ static int cmtp_session(void *arg) struct cmtp_session *session = arg; struct sock *sk = session->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + wait_queue_entry_t wait; BT_DBG("session %p", session); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 0bec4588c3c8..fc31161e98f2 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1244,7 +1244,7 @@ static void hidp_session_run(struct hidp_session *session) static int hidp_session_thread(void *arg) { struct hidp_session *session = arg; - wait_queue_t ctrl_wait, intr_wait; + wait_queue_entry_t ctrl_wait, intr_wait; BT_DBG("session %p", session); diff --git a/net/core/datagram.c b/net/core/datagram.c index db1866f2ffcf..34678828e2bb 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -68,7 +68,7 @@ static inline int connection_based(struct sock *sk) return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; } -static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, +static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { unsigned long bits = (unsigned long)key; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1a0c961f4ffe..c77ced0109b7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -343,7 +343,7 @@ found: * are still connected to it and there's no way to inform "a polling * implementation" that it should let go of a certain wait queue * - * In order to propagate a wake up, a wait_queue_t of the client + * In order to propagate a wake up, a wait_queue_entry_t of the client * socket is enqueued on the peer_wait queue of the server socket * whose wake function does a wake_up on the ordinary client socket * wait queue. This connection is established whenever a write (or @@ -352,7 +352,7 @@ found: * was relayed. */ -static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, +static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, void *key) { struct unix_sock *u; diff --git a/sound/core/control.c b/sound/core/control.c index c109b82eef4b..6362da17ac3f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1577,7 +1577,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, struct snd_ctl_event ev; struct snd_kctl_event *kev; while (list_empty(&ctl->events)) { - wait_queue_t wait; + wait_queue_entry_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto __end_lock; diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 9602a7e38d8a..a73baa1242be 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -85,7 +85,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file) int major = imajor(inode); struct snd_hwdep *hw; int err; - wait_queue_t wait; + wait_queue_entry_t wait; if (major == snd_major) { hw = snd_lookup_minor_data(iminor(inode), diff --git a/sound/core/init.c b/sound/core/init.c index 6bda8436d765..d61d2b3cd521 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -989,7 +989,7 @@ EXPORT_SYMBOL(snd_card_file_remove); */ int snd_power_wait(struct snd_card *card, unsigned int power_state) { - wait_queue_t wait; + wait_queue_entry_t wait; int result = 0; /* fastpath */ diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 36baf962f9b0..cd8b7bef8d06 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1554,7 +1554,7 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size) ssize_t result = 0; snd_pcm_state_t state; long res; - wait_queue_t wait; + wait_queue_entry_t wait; runtime = substream->runtime; init_waitqueue_entry(&wait, current); @@ -2387,7 +2387,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_oss_setup setup[2]; int nonblock; - wait_queue_t wait; + wait_queue_entry_t wait; err = nonseekable_open(inode, file); if (err < 0) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5088d4b8db22..dd5254077ef7 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1904,7 +1904,7 @@ static int wait_for_avail(struct snd_pcm_substream *substream, { struct snd_pcm_runtime *runtime = substream->runtime; int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; - wait_queue_t wait; + wait_queue_entry_t wait; int err = 0; snd_pcm_uframes_t avail = 0; long wait_time, tout; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 13dec5ec93f2..faa2e2be6f2e 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1652,7 +1652,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, struct snd_card *card; struct snd_pcm_runtime *runtime; struct snd_pcm_substream *s; - wait_queue_t wait; + wait_queue_entry_t wait; int result = 0; int nonblock = 0; @@ -2353,7 +2353,7 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file) static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) { int err; - wait_queue_t wait; + wait_queue_entry_t wait; if (pcm == NULL) { err = -ENODEV; diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index ab890336175f..32588ad05653 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -368,7 +368,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) int err; struct snd_rawmidi *rmidi; struct snd_rawmidi_file *rawmidi_file = NULL; - wait_queue_t wait; + wait_queue_entry_t wait; if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) return -EINVAL; /* invalid combination */ @@ -1002,7 +1002,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun while (count > 0) { spin_lock_irq(&runtime->lock); while (!snd_rawmidi_ready(substream)) { - wait_queue_t wait; + wait_queue_entry_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { spin_unlock_irq(&runtime->lock); return result > 0 ? result : -EAGAIN; @@ -1306,7 +1306,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, while (count > 0) { spin_lock_irq(&runtime->lock); while (!snd_rawmidi_ready_append(substream, count)) { - wait_queue_t wait; + wait_queue_entry_t wait; if (file->f_flags & O_NONBLOCK) { spin_unlock_irq(&runtime->lock); return result > 0 ? result : -EAGAIN; @@ -1338,7 +1338,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, if (file->f_flags & O_DSYNC) { spin_lock_irq(&runtime->lock); while (runtime->avail != runtime->buffer_size) { - wait_queue_t wait; + wait_queue_entry_t wait; unsigned int last_avail = runtime->avail; init_waitqueue_entry(&wait, current); add_wait_queue(&runtime->sleep, &wait); diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 01c4cfe30c9f..a8c2822e0198 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -179,7 +179,7 @@ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, { struct snd_seq_event_cell *cell; unsigned long flags; - wait_queue_t wait; + wait_queue_entry_t wait; if (snd_BUG_ON(!f)) return -EINVAL; diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index d4c61ec9be13..d6e9aacdc36b 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -227,7 +227,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell *cell; unsigned long flags; int err = -EAGAIN; - wait_queue_t wait; + wait_queue_entry_t wait; if (pool == NULL) return -EINVAL; diff --git a/sound/core/timer.c b/sound/core/timer.c index cd67d1c12cf1..884c3066b028 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1964,7 +1964,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { - wait_queue_t wait; + wait_queue_entry_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 4dae9ff9ef5a..0b1e4b34b299 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1782,7 +1782,7 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev, int val, int port, unsigned long timeout) { - wait_queue_t wait; + wait_queue_entry_t wait; init_waitqueue_entry(&wait, current); spin_lock_irq(&dev->irq_lock); diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c index dccf3db48fe0..8bf2ce32d4a8 100644 --- a/sound/pci/mixart/mixart_core.c +++ b/sound/pci/mixart/mixart_core.c @@ -239,7 +239,7 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int struct mixart_msg resp; u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ int err; - wait_queue_t wait; + wait_queue_entry_t wait; long timeout; init_waitqueue_entry(&wait, current); @@ -284,7 +284,7 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr, struct mixart_msg *request, u32 notif_event) { int err; - wait_queue_t wait; + wait_queue_entry_t wait; long timeout; if (snd_BUG_ON(!notif_event)) diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c index fe4ba463b57c..1114166c685c 100644 --- a/sound/pci/ymfpci/ymfpci_main.c +++ b/sound/pci/ymfpci/ymfpci_main.c @@ -781,7 +781,7 @@ static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *su static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) { - wait_queue_t wait; + wait_queue_entry_t wait; int loops = 4; while (loops-- > 0) { diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a8d540398bbd..9120edf3c94b 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -184,7 +184,7 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic( * Called with wqh->lock held and interrupts disabled */ static int -irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) +irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct kvm_kernel_irqfd *irqfd = container_of(wait, struct kvm_kernel_irqfd, wait); -- cgit v1.2.3