diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-07-08 19:04:41 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-07-08 19:04:41 +0300 |
commit | 552a031ba12a4236be107a5b082a399237758a5d (patch) | |
tree | a2bcdeb651b360013dbb654bbcd61dbaf51e0331 | |
parent | f584dd32edc5d4400d7ceb92111a89f0c1f6651f (diff) | |
parent | 0ecfebd2b52404ae0c54a878c872bb93363ada36 (diff) | |
download | linux-552a031ba12a4236be107a5b082a399237758a5d.tar.xz |
Merge tag 'v5.2' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
234 files changed, 1832 insertions, 829 deletions
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index 27f02ec4bb45..f97a4ecd7b91 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -152,17 +152,19 @@ examples: - | // Example 2: Spike ISA Simulator with 1 Hart cpus { - cpu@0 { - device_type = "cpu"; - reg = <0>; - compatible = "riscv"; - riscv,isa = "rv64imafdc"; - mmu-type = "riscv,sv48"; - interrupt-controller { - #interrupt-cells = <1>; - interrupt-controller; - compatible = "riscv,cpu-intc"; - }; - }; + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + device_type = "cpu"; + reg = <0>; + compatible = "riscv"; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,sv48"; + interrupt-controller { + #interrupt-cells = <1>; + interrupt-controller; + compatible = "riscv,cpu-intc"; + }; + }; }; ... diff --git a/MAINTAINERS b/MAINTAINERS index 4be48eb5473d..c80bd74681cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3122,6 +3122,7 @@ F: arch/arm/mach-bcm/ BROADCOM BCM2835 ARM ARCHITECTURE M: Eric Anholt <eric@anholt.net> M: Stefan Wahren <wahrenst@gmx.net> +L: bcm-kernel-feedback-list@broadcom.com L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/anholt/linux @@ -3151,6 +3152,7 @@ F: arch/arm/boot/dts/bcm953012* BROADCOM BCM53573 ARM ARCHITECTURE M: RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl> +L: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm/boot/dts/bcm53573* @@ -3940,6 +3942,14 @@ M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> S: Maintained F: .clang-format +CLANG/LLVM BUILD SUPPORT +L: clang-built-linux@googlegroups.com +W: https://clangbuiltlinux.github.io/ +B: https://github.com/ClangBuiltLinux/linux/issues +C: irc://chat.freenode.net/clangbuiltlinux +S: Supported +K: \b(?i:clang|llvm)\b + CLEANCACHE API M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> L: linux-kernel@vger.kernel.org @@ -7800,7 +7810,7 @@ INGENIC JZ4780 NAND DRIVER M: Harvey Hunt <harveyhuntnexus@gmail.com> L: linux-mtd@lists.infradead.org S: Maintained -F: drivers/mtd/nand/raw/jz4780_* +F: drivers/mtd/nand/raw/ingenic/ INOTIFY M: Jan Kara <jack@suse.cz> @@ -15493,6 +15503,7 @@ F: drivers/dma/tegra* TEGRA I2C DRIVER M: Laxman Dewangan <ldewangan@nvidia.com> +R: Dmitry Osipenko <digetx@gmail.com> S: Supported F: drivers/i2c/busses/i2c-tegra.c @@ -2,8 +2,8 @@ VERSION = 5 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc6 -NAME = Golden Lions +EXTRAVERSION = +NAME = Bobtail Squid # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 480af1af9e63..03a0b19c92cd 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := nsim_hs_defconfig +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-) +endif + cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 6a91a742ab3d..7dd2dd335cf6 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -32,8 +32,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) #define ARC_PERIPHERAL_BASE 0xf0000000 #define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) -#define CREG_PAE (CREG_BASE + 0x180) -#define CREG_PAE_UPDATE (CREG_BASE + 0x194) #define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) @@ -99,20 +97,167 @@ static void __init hsdk_enable_gpio_intc_wire(void) iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); } -static void __init hsdk_init_early(void) +enum hsdk_axi_masters { + M_HS_CORE = 0, + M_HS_RTT, + M_AXI_TUN, + M_HDMI_VIDEO, + M_HDMI_AUDIO, + M_USB_HOST, + M_ETHERNET, + M_SDIO, + M_GPU, + M_DMAC_0, + M_DMAC_1, + M_DVFS +}; + +#define UPDATE_VAL 1 + +/* + * This is modified configuration of AXI bridge. Default settings + * are specified in "Table 111 CREG Address Decoder register reset values". + * + * AXI_M_m_SLV{0|1} - Slave Select register for master 'm'. + * Possible slaves are: + * - 0 => no slave selected + * - 1 => DDR controller port #1 + * - 2 => SRAM controller + * - 3 => AXI tunnel + * - 4 => EBI controller + * - 5 => ROM controller + * - 6 => AXI2APB bridge + * - 7 => DDR controller port #2 + * - 8 => DDR controller port #3 + * - 9 => HS38x4 IOC + * - 10 => HS38x4 DMI + * AXI_M_m_OFFSET{0|1} - Addr Offset register for master 'm' + * + * Please read ARC HS Development IC Specification, section 17.2 for more + * information about apertures configuration. + * + * m master AXI_M_m_SLV0 AXI_M_m_SLV1 AXI_M_m_OFFSET0 AXI_M_m_OFFSET1 + * 0 HS (CBU) 0x11111111 0x63111111 0xFEDCBA98 0x0E543210 + * 1 HS (RTT) 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 2 AXI Tunnel 0x88888888 0x88888888 0xFEDCBA98 0x76543210 + * 3 HDMI-VIDEO 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 4 HDMI-ADUIO 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 5 USB-HOST 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98 + * 6 ETHERNET 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98 + * 7 SDIO 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98 + * 8 GPU 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 9 DMAC (port #1) 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 10 DMAC (port #2) 0x77777777 0x77777777 0xFEDCBA98 0x76543210 + * 11 DVFS 0x00000000 0x60000000 0x00000000 0x00000000 + */ + +#define CREG_AXI_M_SLV0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m))) +#define CREG_AXI_M_SLV1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x04)) +#define CREG_AXI_M_OFT0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x08)) +#define CREG_AXI_M_OFT1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x0C)) +#define CREG_AXI_M_UPDT(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x14)) + +#define CREG_AXI_M_HS_CORE_BOOT ((void __iomem *)(CREG_BASE + 0x010)) + +#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) +#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) + +static void __init hsdk_init_memory_bridge(void) { + u32 reg; + + /* + * M_HS_CORE has one unique register - BOOT. + * We need to clean boot mirror (BOOT[1:0]) bits in them to avoid first + * aperture to be masked by 'boot mirror'. + */ + reg = readl(CREG_AXI_M_HS_CORE_BOOT) & (~0x3); + writel(reg, CREG_AXI_M_HS_CORE_BOOT); + writel(0x11111111, CREG_AXI_M_SLV0(M_HS_CORE)); + writel(0x63111111, CREG_AXI_M_SLV1(M_HS_CORE)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_CORE)); + writel(0x0E543210, CREG_AXI_M_OFT1(M_HS_CORE)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_CORE)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_HS_RTT)); + writel(0x77777777, CREG_AXI_M_SLV1(M_HS_RTT)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_RTT)); + writel(0x76543210, CREG_AXI_M_OFT1(M_HS_RTT)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_RTT)); + + writel(0x88888888, CREG_AXI_M_SLV0(M_AXI_TUN)); + writel(0x88888888, CREG_AXI_M_SLV1(M_AXI_TUN)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_AXI_TUN)); + writel(0x76543210, CREG_AXI_M_OFT1(M_AXI_TUN)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_AXI_TUN)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_VIDEO)); + writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_VIDEO)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_VIDEO)); + writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_VIDEO)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_VIDEO)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_AUDIO)); + writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_AUDIO)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_AUDIO)); + writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_AUDIO)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_AUDIO)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_USB_HOST)); + writel(0x77999999, CREG_AXI_M_SLV1(M_USB_HOST)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_USB_HOST)); + writel(0x76DCBA98, CREG_AXI_M_OFT1(M_USB_HOST)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_USB_HOST)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_ETHERNET)); + writel(0x77999999, CREG_AXI_M_SLV1(M_ETHERNET)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_ETHERNET)); + writel(0x76DCBA98, CREG_AXI_M_OFT1(M_ETHERNET)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_ETHERNET)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_SDIO)); + writel(0x77999999, CREG_AXI_M_SLV1(M_SDIO)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_SDIO)); + writel(0x76DCBA98, CREG_AXI_M_OFT1(M_SDIO)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_SDIO)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_GPU)); + writel(0x77777777, CREG_AXI_M_SLV1(M_GPU)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_GPU)); + writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0)); + writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0)); + writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1)); + writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1)); + writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1)); + + writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); + writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); + writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); + writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); + /* * PAE remapping for DMA clients does not work due to an RTL bug, so * CREG_PAE register must be programmed to all zeroes, otherwise it * will cause problems with DMA to/from peripherals even if PAE40 is * not used. */ + writel(0x00000000, CREG_PAE); + writel(UPDATE_VAL, CREG_PAE_UPDT); +} - /* Default is 1, which means "PAE offset = 4GByte" */ - writel_relaxed(0, (void __iomem *) CREG_PAE); - - /* Really apply settings made above */ - writel(1, (void __iomem *) CREG_PAE_UPDATE); +static void __init hsdk_init_early(void) +{ + hsdk_init_memory_bridge(); /* * Switch SDIO external ciu clock divider from default div-by-8 to diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi index 59753470cd34..267d0c178e55 100644 --- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi @@ -336,3 +336,11 @@ status = "disabled"; }; +&uart0 { + compatible = "marvell,armada-38x-uart"; +}; + +&uart1 { + compatible = "marvell,armada-38x-uart"; +}; + diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index cfbfbc91a1e1..3613f05f8a80 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -20,7 +20,7 @@ }; chosen { - bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait"; + bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait consoleblank=300"; stdout-path = "uart0:19200n8"; }; diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts index b12504e10f0b..360642a02a48 100644 --- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts +++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts @@ -11,7 +11,7 @@ / { model = "D-Link DNS-313 1-Bay Network Storage Enclosure"; - compatible = "dlink,dir-313", "cortina,gemini"; + compatible = "dlink,dns-313", "cortina,gemini"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index bbf010c73336..a7f6d1d58e20 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -358,7 +358,7 @@ pwm1: pwm@2080000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02080000 0x4000>; - interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6UL_CLK_PWM1>, <&clks IMX6UL_CLK_PWM1>; clock-names = "ipg", "per"; @@ -369,7 +369,7 @@ pwm2: pwm@2084000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02084000 0x4000>; - interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6UL_CLK_PWM2>, <&clks IMX6UL_CLK_PWM2>; clock-names = "ipg", "per"; @@ -380,7 +380,7 @@ pwm3: pwm@2088000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02088000 0x4000>; - interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6UL_CLK_PWM3>, <&clks IMX6UL_CLK_PWM3>; clock-names = "ipg", "per"; @@ -391,7 +391,7 @@ pwm4: pwm@208c000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x0208c000 0x4000>; - interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6UL_CLK_PWM4>, <&clks IMX6UL_CLK_PWM4>; clock-names = "ipg", "per"; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 7ef442462ea4..40c11b6b217a 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -248,8 +248,8 @@ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>, @@ -264,7 +264,6 @@ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; clock-names = "bus", "core"; operating-points-v2 = <&gpu_opp_table>; - switch-delay = <0xffff>; }; }; }; /* end of / */ diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 800cd65fc50a..ec67f49116d9 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -163,23 +163,23 @@ opp-255000000 { opp-hz = /bits/ 64 <255000000>; - opp-microvolt = <1150000>; + opp-microvolt = <1100000>; }; opp-364300000 { opp-hz = /bits/ 64 <364300000>; - opp-microvolt = <1150000>; + opp-microvolt = <1100000>; }; opp-425000000 { opp-hz = /bits/ 64 <425000000>; - opp-microvolt = <1150000>; + opp-microvolt = <1100000>; }; opp-510000000 { opp-hz = /bits/ 64 <510000000>; - opp-microvolt = <1150000>; + opp-microvolt = <1100000>; }; opp-637500000 { opp-hz = /bits/ 64 <637500000>; - opp-microvolt = <1150000>; + opp-microvolt = <1100000>; turbo-mode; }; }; @@ -229,7 +229,6 @@ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; clock-names = "bus", "core"; operating-points-v2 = <&gpu_opp_table>; - switch-delay = <0xffff>; }; }; }; /* end of / */ diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 51a892702e27..a273ab25c668 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -61,6 +61,9 @@ static struct regulator_consumer_supply da830_evm_usb_supplies[] = { static struct regulator_init_data da830_evm_usb_vbus_data = { .consumer_supplies = da830_evm_usb_supplies, .num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies), + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }; static struct fixed_voltage_config da830_evm_usb_vbus = { @@ -88,7 +91,7 @@ static struct gpiod_lookup_table da830_evm_usb_oc_gpio_lookup = { static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = { .dev_id = "reg-fixed-voltage.0", .table = { - GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0), + GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, NULL, 0), { } }, }; diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index db177a6a7e48..5390a8630cf0 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -306,6 +306,9 @@ static struct regulator_consumer_supply hawk_usb_supplies[] = { static struct regulator_init_data hawk_usb_vbus_data = { .consumer_supplies = hawk_usb_supplies, .num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies), + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, }; static struct fixed_voltage_config hawk_usb_vbus = { diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index fd4a3bf27993..1b442b128569 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -430,7 +430,7 @@ static void omap3_prm_reconfigure_io_chain(void) * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. * No return value. */ -static void __init omap3xxx_prm_enable_io_wakeup(void) +static void omap3xxx_prm_enable_io_wakeup(void) { if (prm_features & PRM_HAS_IO_WAKEUP) omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index b04581249f0b..bf7f845447ed 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -28,7 +28,7 @@ enable-method = "psci"; clocks = <&clockgen 1 0>; next-level-cache = <&l2>; - cpu-idle-states = <&CPU_PH20>; + cpu-idle-states = <&CPU_PW20>; }; cpu1: cpu@1 { @@ -38,7 +38,7 @@ enable-method = "psci"; clocks = <&clockgen 1 0>; next-level-cache = <&l2>; - cpu-idle-states = <&CPU_PH20>; + cpu-idle-states = <&CPU_PW20>; }; l2: l2-cache { @@ -53,13 +53,13 @@ */ entry-method = "arm,psci"; - CPU_PH20: cpu-ph20 { - compatible = "arm,idle-state"; - idle-state-name = "PH20"; - arm,psci-suspend-param = <0x00010000>; - entry-latency-us = <1000>; - exit-latency-us = <1000>; - min-residency-us = <3000>; + CPU_PW20: cpu-pw20 { + compatible = "arm,idle-state"; + idle-state-name = "PW20"; + arm,psci-suspend-param = <0x0>; + entry-latency-us = <2000>; + exit-latency-us = <2000>; + min-residency-us = <6000>; }; }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 4d583514258c..6bca5b082ea4 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -613,6 +613,7 @@ CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_IMX_SC=m CONFIG_RTC_DRV_XGENE=y CONFIG_DMADEVICES=y +CONFIG_FSL_EDMA=y CONFIG_DMA_BCM2835=m CONFIG_K3_DMA=y CONFIG_MV_XOR=y diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 04ca08086d35..2b85c0d6fa3d 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -67,7 +67,11 @@ #ifdef CONFIG_EFI -__efistub_stext_offset = stext - _text; +/* + * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: + * https://github.com/ClangBuiltLinux/linux/issues/561 + */ +__efistub_stext_offset = ABSOLUTE(stext - _text); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index e23a68a5808f..71530e080ecc 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -21,6 +21,7 @@ void *module_alloc(unsigned long size) { + u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; gfp_t gfp_mask = GFP_KERNEL; void *p; @@ -28,9 +29,12 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; + if (IS_ENABLED(CONFIG_KASAN)) + /* don't exceed the static module region - see below */ + module_alloc_end = MODULES_END; + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + MODULES_VSIZE, - gfp_mask, PAGE_KERNEL_EXEC, 0, + module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index c2afa7982047..dfd626447482 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -208,7 +208,7 @@ out: #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) -#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq)) +#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 04a43cfd4e09..d47a3381aad8 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -39,6 +39,11 @@ static int save_fpu_state(struct sigcontext __user *sc) #endif struct rt_sigframe { + /* + * pad[3] is compatible with the same struct defined in + * gcc/libgcc/config/csky/linux-unwind.h + */ + int pad[3]; struct siginfo info; struct ucontext uc; }; diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 8f4486c4415b..eceff9b75b22 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -17,6 +17,7 @@ archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs KBUILD_DEFCONFIG := 32r2el_defconfig +KBUILD_DTBS := dtbs # # Select the object file format to substitute into the linker script. @@ -384,7 +385,7 @@ quiet_cmd_64 = OBJCOPY $@ vmlinux.64: vmlinux $(call cmd,64) -all: $(all-y) +all: $(all-y) $(KBUILD_DTBS) # boot $(boot-y): $(vmlinux-32) FORCE diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 3c453a1f1ff1..172801ed35b8 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE $(call if_changed,objcopy) +HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE) + # Calculate the load address of the compressed kernel image hostprogs-y := calc_vmlinuz_load_addr diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 240f1d12df75..080b926d2623 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -9,7 +9,7 @@ #include <stdint.h> #include <stdio.h> #include <stdlib.h> -#include "../../../../include/linux/sizes.h" +#include <linux/sizes.h> int main(int argc, char *argv[]) { diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h index b8f8af7dc47c..cacf3545e018 100644 --- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h +++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h @@ -24,8 +24,8 @@ #define AR933X_UART_CS_PARITY_S 0 #define AR933X_UART_CS_PARITY_M 0x3 #define AR933X_UART_CS_PARITY_NONE 0 -#define AR933X_UART_CS_PARITY_ODD 1 -#define AR933X_UART_CS_PARITY_EVEN 2 +#define AR933X_UART_CS_PARITY_ODD 2 +#define AR933X_UART_CS_PARITY_EVEN 3 #define AR933X_UART_CS_IF_MODE_S 2 #define AR933X_UART_CS_IF_MODE_M 0x3 #define AR933X_UART_CS_IF_MODE_NONE 0 diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h index 75a1cdee1331..084cac1c5ea2 100644 --- a/arch/mips/include/asm/mips-gic.h +++ b/arch/mips/include/asm/mips-gic.h @@ -311,6 +311,36 @@ static inline bool mips_gic_present(void) } /** + * mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset + * @intr: A GIC local interrupt + * + * Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register + * within the block of GIC map registers. This is almost the same as the order + * of interrupts in the pending & mask registers, as used by enum + * mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the + * interrupts after it... + * + * Return: The map register index corresponding to @intr. + * + * The return value is suitable for use with the (read|write)_gic_v[lo]_map + * accessor functions. + */ +static inline unsigned int +mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr) +{ + /* WD, Compare & Timer are 1:1 */ + if (intr <= GIC_LOCAL_INT_TIMER) + return intr; + + /* FDC moves to after Timer... */ + if (intr == GIC_LOCAL_INT_FDC) + return GIC_LOCAL_INT_TIMER + 1; + + /* As a result everything else is offset by 1 */ + return intr + 1; +} + +/** * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq * * Determine the virq number to use for the coprocessor 0 count/compare diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 50ee7213b432..d79f2b432318 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) bool __virt_addr_valid(const volatile void *kaddr) { - unsigned long vaddr = (unsigned long)vaddr; + unsigned long vaddr = (unsigned long)kaddr; if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) return false; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 65b6e85447b1..144ceb0fba88 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p) static void build_restore_work_registers(u32 **p) { if (scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); return; } @@ -668,10 +669,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg >= 0) + if (scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - else + } else { UASM_i_LW(p, 1, scratchpad_offset(0), 0); + } } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { @@ -938,10 +941,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg >= 0) + if (scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - else + } else { UASM_i_LW(p, 1, scratchpad_offset(0), 0); + } } else { uasm_i_nop(p); } @@ -1258,6 +1263,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ if (c0_scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); @@ -1603,15 +1609,17 @@ static void build_setup_pgd(void) uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); uasm_l_tlbl_goaround1(&l, p); UASM_i_SLL(&p, a0, a0, 11); - uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, C0_CONTEXT); + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); } else { /* PGD in c0_KScratch */ - uasm_i_jr(&p, 31); if (cpu_has_ldpte) UASM_i_MTC0(&p, a0, C0_PWBASE); else UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); } #else #ifdef CONFIG_SMP @@ -1625,13 +1633,16 @@ static void build_setup_pgd(void) UASM_i_LA_mostly(&p, a2, pgdc); UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); #endif /* SMP */ - uasm_i_jr(&p, 31); /* if pgd_reg is allocated, save PGD also to scratch register */ - if (pgd_reg != -1) + if (pgd_reg != -1) { UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); - else + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); + } else { + uasm_i_jr(&p, 31); uasm_i_nop(&p); + } #endif if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) panic("tlbmiss_handler_setup_pgd space exceeded"); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index f241ded9239b..1f0f29a289d3 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -786,6 +786,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, /* 32-bit PC relative address */ *loc = val - dot - 8 + addend; break; + case R_PARISC_PCREL64: + /* 64-bit PC relative address */ + *loc64 = val - dot - 8 + addend; + break; case R_PARISC_DIR64: /* 64-bit effective address */ *loc64 = val + addend; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6b86055e5251..73ba246ca11d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -315,7 +315,7 @@ TRAMP_REAL_BEGIN(machine_check_common_early) mfspr r11,SPRN_DSISR /* Save DSISR */ std r11,_DSISR(r1) std r9,_CCR(r1) /* Save CR in stackframe */ - kuap_save_amr_and_lock r9, r10, cr1 + /* We don't touch AMR here, we never go to virtual mode */ /* Save r9 through r13 from EXMC save area to stack frame. */ EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) mfmsr r11 /* get MSR value */ diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index bb70391401f7..794404d50a85 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -50,20 +50,52 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id); void slb_setup_new_exec(void); +static int realloc_context_ids(mm_context_t *ctx) +{ + int i, id; + + /* + * id 0 (aka. ctx->id) is special, we always allocate a new one, even if + * there wasn't one allocated previously (which happens in the exec + * case where ctx is newly allocated). + * + * We have to be a bit careful here. We must keep the existing ids in + * the array, so that we can test if they're non-zero to decide if we + * need to allocate a new one. However in case of error we must free the + * ids we've allocated but *not* any of the existing ones (or risk a + * UAF). That's why we decrement i at the start of the error handling + * loop, to skip the id that we just tested but couldn't reallocate. + */ + for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { + if (i == 0 || ctx->extended_id[i]) { + id = hash__alloc_context_id(); + if (id < 0) + goto error; + + ctx->extended_id[i] = id; + } + } + + /* The caller expects us to return id */ + return ctx->id; + +error: + for (i--; i >= 0; i--) { + if (ctx->extended_id[i]) + ida_free(&mmu_context_ida, ctx->extended_id[i]); + } + + return id; +} + static int hash__init_new_context(struct mm_struct *mm) { int index; - index = hash__alloc_context_id(); - if (index < 0) - return index; - mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL); - if (!mm->context.hash_context) { - ida_free(&mmu_context_ida, index); + if (!mm->context.hash_context) return -ENOMEM; - } /* * The old code would re-promote on fork, we don't do that when using @@ -91,13 +123,20 @@ static int hash__init_new_context(struct mm_struct *mm) mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!mm->context.hash_context->spt) { - ida_free(&mmu_context_ida, index); kfree(mm->context.hash_context); return -ENOMEM; } } #endif + } + index = realloc_context_ids(&mm->context); + if (index < 0) { +#ifdef CONFIG_PPC_SUBPAGE_PROT + kfree(mm->context.hash_context->spt); +#endif + kfree(mm->context.hash_context); + return index; } pkey_mm_init(mm); diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 3c06ee4b2b29..40983491b95f 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -163,6 +163,7 @@ interrupt-parent = <&plic0>; interrupts = <4>; clocks = <&prci PRCI_CLK_TLCLK>; + status = "disabled"; }; uart1: serial@10011000 { compatible = "sifive,fu540-c000-uart", "sifive,uart0"; @@ -170,6 +171,7 @@ interrupt-parent = <&plic0>; interrupts = <5>; clocks = <&prci PRCI_CLK_TLCLK>; + status = "disabled"; }; i2c0: i2c@10030000 { compatible = "sifive,fu540-c000-i2c", "sifive,i2c0"; @@ -181,6 +183,7 @@ reg-io-width = <1>; #address-cells = <1>; #size-cells = <0>; + status = "disabled"; }; qspi0: spi@10040000 { compatible = "sifive,fu540-c000-spi", "sifive,spi0"; @@ -191,6 +194,7 @@ clocks = <&prci PRCI_CLK_TLCLK>; #address-cells = <1>; #size-cells = <0>; + status = "disabled"; }; qspi1: spi@10041000 { compatible = "sifive,fu540-c000-spi", "sifive,spi0"; @@ -201,6 +205,7 @@ clocks = <&prci PRCI_CLK_TLCLK>; #address-cells = <1>; #size-cells = <0>; + status = "disabled"; }; qspi2: spi@10050000 { compatible = "sifive,fu540-c000-spi", "sifive,spi0"; @@ -210,6 +215,7 @@ clocks = <&prci PRCI_CLK_TLCLK>; #address-cells = <1>; #size-cells = <0>; + status = "disabled"; }; }; }; diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 4da88707e28f..0b55c53c08c7 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -42,7 +42,20 @@ }; }; +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; +}; + &qspi0 { + status = "okay"; flash@0 { compatible = "issi,is25wp256", "jedec,spi-nor"; reg = <0>; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 4f02967e55de..04944fb4fa7a 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -69,6 +69,7 @@ CONFIG_VIRTIO_MMIO=y CONFIG_CLK_SIFIVE=y CONFIG_CLK_SIFIVE_FU540_PRCI=y CONFIG_SIFIVE_PLIC=y +CONFIG_SPI_SIFIVE=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS4_FS=y @@ -84,4 +85,8 @@ CONFIG_ROOT_NFS=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y +CONFIG_SPI=y +CONFIG_MMC_SPI=y +CONFIG_MMC=y +CONFIG_DEVTMPFS_MOUNT=y # CONFIG_RCU_TRACE is not set diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 3e2708c626a8..f960c3f4ce47 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -272,9 +272,6 @@ vmalloc_fault: * entries, but in RISC-V, SFENCE.VMA specifies an * ordering constraint, not a cache flush; it is * necessary even after writing invalid entries. - * Relying on flush_tlb_fix_spurious_fault would - * suffice, but the extra traps reduce - * performance. So, eagerly SFENCE.VMA. */ local_flush_tlb_page(addr); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index f0e4804515d8..ffc015bd257e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event) } /* sample_regs_user never support XMM registers */ - if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS)) + if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) return -EINVAL; /* * Besides the general purpose registers, XMM registers may * be collected in PEBS on some platforms, e.g. Icelake */ - if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) { - if (x86_pmu.pebs_no_xmm_regs) + if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { + if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) return -EINVAL; if (!event->attr.precise_ip) @@ -2328,13 +2328,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re return; } - if (perf_hw_regs(regs)) { - if (perf_callchain_store(entry, regs->ip)) - return; + if (perf_callchain_store(entry, regs->ip)) + return; + + if (perf_hw_regs(regs)) unwind_start(&state, current, regs, NULL); - } else { + else unwind_start(&state, current, NULL, (void *)regs->sp); - } for (; !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 7acc526b4ad2..505c73dc6a73 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -987,7 +987,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event) pebs_data_cfg |= PEBS_DATACFG_GP; if ((sample_type & PERF_SAMPLE_REGS_INTR) && - (attr->sample_regs_intr & PEBS_XMM_REGS)) + (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) pebs_data_cfg |= PEBS_DATACFG_XMMS; if (sample_type & PERF_SAMPLE_BRANCH_STACK) { @@ -1964,10 +1964,9 @@ void __init intel_ds_init(void) x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; - if (x86_pmu.version <= 4) { + if (x86_pmu.version <= 4) x86_pmu.pebs_no_isolation = 1; - x86_pmu.pebs_no_xmm_regs = 1; - } + if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; char *pebs_qual = ""; @@ -2020,9 +2019,9 @@ void __init intel_ds_init(void) PERF_SAMPLE_TIME; x86_pmu.flags |= PMU_FL_PEBS_ALL; pebs_qual = "-baseline"; + x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; } else { /* Only basic record supported */ - x86_pmu.pebs_no_xmm_regs = 1; x86_pmu.large_pebs_flags &= ~(PERF_SAMPLE_ADDR | PERF_SAMPLE_TIME | diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 9bcec3f99e4a..8751008fc170 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -121,24 +121,6 @@ struct amd_nb { (1ULL << PERF_REG_X86_R14) | \ (1ULL << PERF_REG_X86_R15)) -#define PEBS_XMM_REGS \ - ((1ULL << PERF_REG_X86_XMM0) | \ - (1ULL << PERF_REG_X86_XMM1) | \ - (1ULL << PERF_REG_X86_XMM2) | \ - (1ULL << PERF_REG_X86_XMM3) | \ - (1ULL << PERF_REG_X86_XMM4) | \ - (1ULL << PERF_REG_X86_XMM5) | \ - (1ULL << PERF_REG_X86_XMM6) | \ - (1ULL << PERF_REG_X86_XMM7) | \ - (1ULL << PERF_REG_X86_XMM8) | \ - (1ULL << PERF_REG_X86_XMM9) | \ - (1ULL << PERF_REG_X86_XMM10) | \ - (1ULL << PERF_REG_X86_XMM11) | \ - (1ULL << PERF_REG_X86_XMM12) | \ - (1ULL << PERF_REG_X86_XMM13) | \ - (1ULL << PERF_REG_X86_XMM14) | \ - (1ULL << PERF_REG_X86_XMM15)) - /* * Per register state. */ @@ -665,8 +647,7 @@ struct x86_pmu { pebs_broken :1, pebs_prec_dist :1, pebs_no_tlb :1, - pebs_no_isolation :1, - pebs_no_xmm_regs :1; + pebs_no_isolation :1; int pebs_record_size; int pebs_buffer_size; int max_pebs_events; diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h index ac67bbea10ca..7c9d2bb3833b 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -52,4 +52,7 @@ enum perf_event_x86_regs { /* These include both GPRs and XMMX registers */ PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, }; + +#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) + #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 177aa8ef2afa..85be316665b4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1464,7 +1464,8 @@ static void apic_pending_intr_clear(void) if (queued) { if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { ntsc = rdtsc(); - max_loops = (cpu_khz << 10) - (ntsc - tsc); + max_loops = (long long)cpu_khz << 10; + max_loops -= ntsc - tsc; } else { max_loops--; } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 03b4cc0ec3a7..66ca906aa790 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -836,6 +836,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) } /* + * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper + * bit in the mask to allow guests to use the mitigation even in the + * case where the host does not enable it. + */ + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; + } + + /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass @@ -852,7 +862,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; - x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); } } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a813987b5552..cb0fdcaf1415 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -789,13 +789,16 @@ static struct syscore_ops mc_syscore_ops = { .resume = mc_bp_resume, }; -static int mc_cpu_online(unsigned int cpu) +static int mc_cpu_starting(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); microcode_update_cpu(cpu); pr_debug("CPU%d added\n", cpu); + return 0; +} + +static int mc_cpu_online(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); @@ -872,7 +875,9 @@ int __init microcode_init(void) goto out_ucode_group; register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", + mc_cpu_starting, NULL); + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2131b8bbaad7..2f4824793798 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -796,8 +796,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; - u32 sw_shareable = 0, hw_shareable = 0; - u32 exclusive = 0, pseudo_locked = 0; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; struct rdt_domain *dom; int i, hwb, swb, excl, psl; enum rdtgrp_mode mode; @@ -842,10 +846,10 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, } for (i = r->cache.cbm_len - 1; i >= 0; i--) { pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, (unsigned long *)&hw_shareable); - swb = test_bit(i, (unsigned long *)&sw_shareable); - excl = test_bit(i, (unsigned long *)&exclusive); - psl = test_bit(i, (unsigned long *)&pseudo_locked); + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); if (hwb && swb) seq_putc(seq, 'X'); else if (hwb && !swb) @@ -2486,26 +2490,19 @@ out_destroy: */ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r) { - /* - * Convert the u32 _val to an unsigned long required by all the bit - * operations within this function. No more than 32 bits of this - * converted value can be accessed because all bit operations are - * additionally provided with cbm_len that is initialized during - * hardware enumeration using five bits from the EAX register and - * thus never can exceed 32 bits. - */ - unsigned long *val = (unsigned long *)_val; + unsigned long val = *_val; unsigned int cbm_len = r->cache.cbm_len; unsigned long first_bit, zero_bit; - if (*val == 0) + if (val == 0) return; - first_bit = find_first_bit(val, cbm_len); - zero_bit = find_next_zero_bit(val, cbm_len, first_bit); + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(val, zero_bit, cbm_len - zero_bit); + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + *_val = (u32)val; } /* diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0927bb158ffc..76228525acd0 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/memory.h> #include <trace/syscall.h> @@ -34,16 +35,25 @@ #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_arch_code_modify_prepare(void) + __acquires(&text_mutex) { + /* + * Need to grab text_mutex to prevent a race from module loading + * and live kernel patching from changing the text permissions while + * ftrace has it set to "read/write". + */ + mutex_lock(&text_mutex); set_kernel_text_rw(); set_all_modules_text_rw(); return 0; } int ftrace_arch_code_modify_post_process(void) + __releases(&text_mutex) { set_all_modules_text_ro(); set_kernel_text_ro(); + mutex_unlock(&text_mutex); return 0; } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 16b1cbd3a61e..29ffa495bd1c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr, pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); if (la57) { - p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); + p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], + physaddr); i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; - i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; - p4d[i + 0] = (pgdval_t)pud + pgtable_flags; - p4d[i + 1] = (pgdval_t)pud + pgtable_flags; + i = physaddr >> P4D_SHIFT; + p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; + p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; } else { i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; pgd[i + 0] = (pgdval_t)pud + pgtable_flags; pgd[i + 1] = (pgdval_t)pud + pgtable_flags; } - i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; - pud[i + 0] = (pudval_t)pmd + pgtable_flags; - pud[i + 1] = (pudval_t)pmd + pgtable_flags; + i = physaddr >> PUD_SHIFT; + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; /* Filter out unsupported __PAGE_KERNEL_* bits: */ @@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr, pmd_entry += physaddr; for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { - int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; - pmd[idx] = pmd_entry + i * PMD_SIZE; + int idx = i + (physaddr >> PMD_SHIFT); + + pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; } /* diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 07c30ee17425..bb7e1132290b 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -74,6 +74,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs_get_register(regs, pt_regs_offset[idx]); } +#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ + ~((1ULL << PERF_REG_X86_MAX) - 1)) + #ifdef CONFIG_X86_32 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ (1ULL << PERF_REG_X86_R9) | \ @@ -86,7 +89,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) int perf_reg_validate(u64 mask) { - if (!mask || (mask & REG_NOSUPPORT)) + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; @@ -112,7 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user, int perf_reg_validate(u64 mask) { - if (!mask || (mask & REG_NOSUPPORT)) + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 33b66b5c5aec..72b997eaa1fc 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -82,9 +82,9 @@ static struct orc_entry *orc_find(unsigned long ip); * But they are copies of the ftrace entries that are static and * defined in ftrace_*.S, which do have orc entries. * - * If the undwinder comes across a ftrace trampoline, then find the + * If the unwinder comes across a ftrace trampoline, then find the * ftrace function that was used to create it, and use that ftrace - * function's orc entrie, as the placement of the return code in + * function's orc entry, as the placement of the return code in * the stack will be identical. */ static struct orc_entry *orc_ftrace_find(unsigned long ip) @@ -128,6 +128,16 @@ static struct orc_entry null_orc_entry = { .type = ORC_TYPE_CALL }; +/* Fake frame pointer entry -- used as a fallback for generated code */ +static struct orc_entry orc_fp_entry = { + .type = ORC_TYPE_CALL, + .sp_reg = ORC_REG_BP, + .sp_offset = 16, + .bp_reg = ORC_REG_PREV_SP, + .bp_offset = -16, + .end = 0, +}; + static struct orc_entry *orc_find(unsigned long ip) { static struct orc_entry *orc; @@ -392,8 +402,16 @@ bool unwind_next_frame(struct unwind_state *state) * calls and calls to noreturn functions. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); - if (!orc) - goto err; + if (!orc) { + /* + * As a fallback, try to assume this code uses a frame pointer. + * This is useful for generated code, like BPF, which ORC + * doesn't know about. This is just a guess, so the rest of + * the unwind is no longer considered reliable. + */ + orc = &orc_fp_entry; + state->error = true; + } /* End-of-stack check for kernel threads: */ if (orc->sp_reg == ORC_REG_UNDEFINED) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index a21c440ff356..4dabc318adb8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2339,7 +2339,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; u32 ppr; - if (!apic_enabled(apic)) + if (!kvm_apic_hw_enabled(apic)) return -1; __apic_update_ppr(apic, &ppr); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5f9c1a200201..46af3a5e9209 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5240,9 +5240,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, vmx = to_vmx(vcpu); vmcs12 = get_vmcs12(vcpu); - if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) - kvm_state.flags |= KVM_STATE_NESTED_EVMCS; - if (nested_vmx_allowed(vcpu) && (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; @@ -5251,6 +5248,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (vmx_has_valid_vmcs12(vcpu)) { kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); + if (vmx->nested.hv_evmcs) + kvm_state.flags |= KVM_STATE_NESTED_EVMCS; + if (is_guest_mode(vcpu) && nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) @@ -5350,6 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) return -EINVAL; + /* + * KVM_STATE_NESTED_EVMCS used to signal that KVM should + * enable eVMCS capability on vCPU. However, since then + * code was changed such that flag signals vmcs12 should + * be copied into eVMCS in guest memory. + * + * To preserve backwards compatability, allow user + * to set this flag even when there is no VMXON region. + */ if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) return -EINVAL; } else { @@ -5358,7 +5367,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) return -EINVAL; - } + } if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) @@ -5373,20 +5382,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags * must be zero. */ - if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags) + if (is_smm(vcpu) ? + (kvm_state->flags & + (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) + : kvm_state->hdr.vmx.smm.flags) return -EINVAL; if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; - vmx_leave_nested(vcpu); - if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { - if (!nested_vmx_allowed(vcpu)) + if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && + (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) return -EINVAL; - nested_enable_evmcs(vcpu, NULL); - } + vmx_leave_nested(vcpu); if (kvm_state->hdr.vmx.vmxon_pa == -1ull) return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9857992d4e58..fafd81d2c9ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1554,7 +1554,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) vcpu->arch.tsc_always_catchup = 1; return 0; } else { - WARN(1, "user requested TSC rate below hardware speed\n"); + pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); return -1; } } @@ -1564,8 +1564,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) user_tsc_khz, tsc_khz); if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { - WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", - user_tsc_khz); + pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", + user_tsc_khz); return -1; } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 693aaf28d5fe..0f01c7b1d217 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -671,23 +671,25 @@ static unsigned long __meminit phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, unsigned long page_size_mask, bool init) { - unsigned long paddr_next, paddr_last = paddr_end; - unsigned long vaddr = (unsigned long)__va(paddr); - int i = p4d_index(vaddr); + unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; + + paddr_last = paddr_end; + vaddr = (unsigned long)__va(paddr); + vaddr_end = (unsigned long)__va(paddr_end); if (!pgtable_l5_enabled()) return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask, init); - for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { - p4d_t *p4d; + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + p4d_t *p4d = p4d_page + p4d_index(vaddr); pud_t *pud; - vaddr = (unsigned long)__va(paddr); - p4d = p4d_page + p4d_index(vaddr); - paddr_next = (paddr & P4D_MASK) + P4D_SIZE; + vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; + paddr = __pa(vaddr); if (paddr >= paddr_end) { + paddr_next = __pa(vaddr_next); if (!after_bootmem && !e820__mapped_any(paddr & P4D_MASK, paddr_next, E820_TYPE_RAM) && @@ -699,13 +701,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, if (!p4d_none(*p4d)) { pud = pud_offset(p4d, 0); - paddr_last = phys_pud_init(pud, paddr, paddr_end, - page_size_mask, init); + paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), + page_size_mask, init); continue; } pud = alloc_low_page(); - paddr_last = phys_pud_init(pud, paddr, paddr_end, + paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), page_size_mask, init); spin_lock(&init_mm.page_table_lock); diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 632b83885867..3b9fd679cea9 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -728,7 +728,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so * page faulting on these addresses isn't expected. */ - if (phys_addr >= 0x0000 && phys_addr <= 0x0fff) + if (phys_addr <= 0x0fff) return; /* diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index f8d430f88d25..f9269ae6da9c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -240,7 +240,7 @@ static struct kmem_cache *bfq_pool; * containing only random (seeky) I/O are prevented from being tagged * as soft real-time. */ -#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history & -1) +#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1) /* Min number of samples required to perform peak-rate update */ #define BFQ_RATE_MIN_SAMPLES 32 diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 2489ddbb21db..3afe327f816f 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -934,6 +934,13 @@ void blk_mq_debugfs_register_sched(struct request_queue *q) { struct elevator_type *e = q->elevator->type; + /* + * If the parent directory has not been created yet, return, we will be + * called again later on and the directory/files will be created then. + */ + if (!q->debugfs_dir) + return; + if (!e->queue_debugfs_attrs) return; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 1ce1bf6d3bab..5f76c6e222c6 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -388,6 +388,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ctx->spawn); + kfree(inst); } static int cryptd_create_skcipher(struct crypto_template *tmpl, diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c index d5d5d155340b..c65e39005ce2 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user_base.c @@ -44,6 +44,9 @@ struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) list_for_each_entry(q, &crypto_alg_list, cra_list) { int match = 0; + if (crypto_is_larval(q)) + continue; + if ((q->cra_flags ^ p->cru_type) & p->cru_mask) continue; diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index 40c8a552a478..4074886b7bc8 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -52,8 +52,9 @@ static const struct fb_var_screeninfo cfag12864bfb_var = { static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { - return vm_insert_page(vma, vma->vm_start, - virt_to_page(cfag12864b_buffer)); + struct page *pages = virt_to_page(cfag12864b_buffer); + + return vm_map_pages_zero(vma, &pages, 1); } static struct fb_ops cfag12864bfb_ops = { diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 21393ec3b9a4..9c0bb771751d 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -223,9 +223,9 @@ static const struct backlight_ops ht16k33_bl_ops = { static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct ht16k33_priv *priv = info->par; + struct page *pages = virt_to_page(priv->fbdev.buffer); - return vm_insert_page(vma, vma->vm_start, - virt_to_page(priv->fbdev.buffer)); + return vm_map_pages_zero(vma, &pages, 1); } static struct fb_ops ht16k33_fb_ops = { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index aa51756fd4d6..87b410d6e51d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) const char *dev_id = dev ? dev_name(dev) : NULL; struct device_node *np = core->of_node; - if (np && index >= 0) + if (np && (name || index >= 0)) hw = of_clk_get_hw(np, index, name); /* diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 739f64fdf1e3..206fafd299ea 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = { [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, [CLKID_MALI_1] = &g12a_mali_1.hw, [CLKID_MALI] = &g12a_mali.hw, - [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw, - [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw, + [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw, + [CLKID_MPLL_50M] = &g12a_mpll_50m.hw, [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index 39c41af70804..bcc05cd9882f 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -166,7 +166,7 @@ #define CLKID_HDMI_DIV 167 #define CLKID_MALI_0_DIV 170 #define CLKID_MALI_1_DIV 173 -#define CLKID_MPLL_5OM_DIV 176 +#define CLKID_MPLL_50M_DIV 176 #define CLKID_SYS_PLL_DIV16_EN 178 #define CLKID_SYS_PLL_DIV16 179 #define CLKID_CPU_CLK_DYN0_SEL 180 diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 37cf0f01bb5d..62cd3a7f1f65 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = { }, }; -static const char * const mmeson8b_vpu_0_1_parent_names[] = { +static const char * const meson8b_vpu_0_1_parent_names[] = { "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7" }; @@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = { .hw.init = &(struct clk_init_data){ .name = "vpu_0_sel", .ops = &clk_regmap_mux_ops, - .parent_names = mmeson8b_vpu_0_1_parent_names, - .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), + .parent_names = meson8b_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names), .flags = CLK_SET_RATE_PARENT, }, }; @@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = { .hw.init = &(struct clk_init_data){ .name = "vpu_1_sel", .ops = &clk_regmap_mux_ops, - .parent_names = mmeson8b_vpu_0_1_parent_names, - .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), + .parent_names = meson8b_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names), .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 8281dfbf38c2..5bed36e12951 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0, 0, 0x3C, 1}, { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux), - 0, 0, 4, 0xB0, 0}, + 0, 0, 2, 0xB0, 0}, { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), - 0, 0, 4, 0xB0, 1}, + 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index e1ba62d2b1a0..ac1d27a8c650 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 }, + { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 }, /* This MUST be the last entry. */ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 8e834317c97d..975995eea15c 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, { struct omap_clkctrl_provider *provider = data; struct omap_clkctrl_clk *entry; + bool found = false; if (clkspec->args_count != 2) return ERR_PTR(-EINVAL); @@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, list_for_each_entry(entry, &provider->clocks, node) { if (entry->reg_offset == clkspec->args[0] && - entry->bit_offset == clkspec->args[1]) + entry->bit_offset == clkspec->args[1]) { + found = true; break; + } } - if (!entry) + if (!found) return ERR_PTR(-EINVAL); return entry->clk; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 263bee76ef0d..6b8c4c458e8a 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) { struct jz4780_dma_dev *jzdma = data; unsigned int nb_channels = jzdma->soc_data->nb_channels; - uint32_t pending, dmac; + unsigned long pending; + uint32_t dmac; int i; pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); - for_each_set_bit(i, (unsigned long *)&pending, nb_channels) { + for_each_set_bit(i, &pending, nb_channels) { if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) pending &= ~BIT(i); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 99d9f431ae2c..4ec84a633bd3 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, spin_lock_irqsave(&sdma->channel_0_lock, flags); bd0->mode.command = C0_SETPM; - bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; + bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; bd0->mode.count = size / 2; bd0->buffer_addr = buf_phys; bd0->ext_buffer_addr = address; @@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) context->gReg[7] = sdmac->watermark_level; bd0->mode.command = C0_SETDM; - bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; + bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; bd0->mode.count = sizeof(*context) / 4; bd0->buffer_addr = sdma->context_phys; bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; @@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev) if (pdata && pdata->script_addrs) sdma_add_scripts(sdma, pdata->script_addrs); - if (pdata) { - ret = sdma_get_firmware(sdma, pdata->fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); - } else { - /* - * Because that device tree does not encode ROM script address, - * the RAM script in firmware is mandatory for device tree - * probe, otherwise it fails. - */ - ret = of_property_read_string(np, "fsl,sdma-ram-script-name", - &fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware name\n"); - else { - ret = sdma_get_firmware(sdma, fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); - } - } - sdma->dma_device.dev = &pdev->dev; sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; @@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev) of_node_put(spba_bus); } + /* + * Kick off firmware loading as the very last step: + * attempt to load firmware only if we're not on the error path, because + * the firmware callback requires a fully functional and allocated sdma + * instance. + */ + if (pdata) { + ret = sdma_get_firmware(sdma, pdata->fw_name); + if (ret) + dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); + } else { + /* + * Because that device tree does not encode ROM script address, + * the RAM script in firmware is mandatory for device tree + * probe, otherwise it fails. + */ + ret = of_property_read_string(np, "fsl,sdma-ram-script-name", + &fw_name); + if (ret) { + dev_warn(&pdev->dev, "failed to get firmware name\n"); + } else { + ret = sdma_get_firmware(sdma, fw_name); + if (ret) + dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); + } + } + return 0; err_register: diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 4b43844f6af5..8e90a405939d 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev) /* Number of bytes available to read */ avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); + if (offset < bchan->head) + avail--; + list_for_each_entry_safe(async_desc, tmp, &bchan->desc_list, desc_node) { /* Not enough data to read */ diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index a2384184a7de..b07c17643210 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c @@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table) bgrt->version); goto out; } - if (bgrt->status & 0xfe) { - pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", - bgrt->status); - goto out; - } if (bgrt->image_type != 0) { pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", bgrt->image_type); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 16b2137d117c..4b7cf7bc0ded 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -1009,14 +1009,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) /* first try to find a slot in an existing linked list entry */ for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { - rsv = __va(prsv); + rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { rsv->entry[index].base = addr; rsv->entry[index].size = size; + memunmap(rsv); return 0; } + memunmap(rsv); } /* no slot found - allocate a new linked list entry */ @@ -1024,7 +1026,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) if (!rsv) return -ENOMEM; - rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE); + /* + * The memremap() call above assumes that a linux_efi_memreserve entry + * never crosses a page boundary, so let's ensure that this remains true + * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by + * using SZ_4K explicitly in the size calculation below. + */ + rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); atomic_set(&rsv->count, 1); rsv->entry[0].base = addr; rsv->entry[0].size = size; diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 61e099826cbb..35dccc88ac0a 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value) efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data); memcpy(&entry->var.VendorGuid, &guid, sizeof(guid)); - ret = efivar_entry_set(entry, - EFI_VARIABLE_NON_VOLATILE - | EFI_VARIABLE_BOOTSERVICE_ACCESS - | EFI_VARIABLE_RUNTIME_ACCESS, - size, entry->var.Data, NULL); + ret = efivar_entry_set_safe(entry->var.VariableName, + entry->var.VendorGuid, + EFI_VARIABLE_NON_VOLATILE + | EFI_VARIABLE_BOOTSERVICE_ACCESS + | EFI_VARIABLE_RUNTIME_ACCESS, + false, size, entry->var.Data); + if (ret) pr_err("failed to set %s EFI variable: 0x%x\n", name, ret); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index aec7bd86ae7e..9c9b965d7d6d 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np, * Legacy handling of SPI active high chip select. If we have a * property named "cs-gpios" we need to inspect the child node * to determine if the flags should have inverted semantics. + * + * This does not apply to an SPI device named "spi-gpio", because + * these have traditionally obtained their own GPIOs by parsing + * the device tree directly and did not respect any "spi-cs-high" + * property on the SPI bus children. */ - if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && + if (IS_ENABLED(CONFIG_SPI_MASTER) && + !strcmp(propname, "cs-gpios") && + !of_device_is_compatible(np, "spi-gpio") && of_property_read_bool(np, "cs-gpios")) { struct device_node *child; u32 cs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b610e3b30d95..2f18c64d531f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v9_0_init_compute_vmid(adev); - - mutex_lock(&adev->grbm_idx_mutex); - /* - * making sure that the following register writes will be broadcasted - * to all the shaders - */ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, - (adev->gfx.config.sc_prim_fifo_size_frontend << - PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_prim_fifo_size_backend << - PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_hiz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_earlyz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); - mutex_unlock(&adev->grbm_idx_mutex); - } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index f1d326caf69e..a7e8340baf90 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) if (ret) return ret; - ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index ae64ff7153d6..1cd5a8b5cdc1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -916,8 +916,10 @@ static int init_thermal_controller( PHM_PlatformCaps_ThermalController ); - if (0 == powerplay_table->usFanTableOffset) + if (0 == powerplay_table->usFanTableOffset) { + hwmgr->thermal_controller.use_hw_fan_control = 1; return 0; + } fan_table = (const PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index c92999aac07c..eccb26fddbd0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -694,6 +694,7 @@ struct pp_thermal_controller_info { uint8_t ucType; uint8_t ucI2cLine; uint8_t ucI2cAddress; + uint8_t use_hw_fan_control; struct pp_fan_info fanInfo; struct pp_advance_fan_control_parameters advanceFanControlParameters; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 2d4cfe14f72e..29e641c6a5db 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return 0; } + /* use hardware fan control */ + if (hwmgr->thermal_controller.use_hw_fan_control) + return 0; + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. usPWMMin * duty100; do_div(tmp64, 10000); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 72d01e873160..5418a1a87b2c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (IS_ERR(gpu->cmdbuf_suballoc)) { dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); ret = PTR_ERR(gpu->cmdbuf_suballoc); - goto fail; + goto destroy_iommu; } /* Create buffer: */ @@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) PAGE_SIZE); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); - goto destroy_iommu; + goto destroy_suballoc; } if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && @@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) free_buffer: etnaviv_cmdbuf_free(&gpu->buffer); gpu->buffer.suballoc = NULL; +destroy_suballoc: + etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); + gpu->cmdbuf_suballoc = NULL; destroy_iommu: etnaviv_iommu_destroy(gpu->mmu); gpu->mmu = NULL; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 029fd8ec1857..f0d45ccc1aac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request) */ request->reserved_space += LEGACY_REQUEST_SIZE; - ret = switch_context(request); + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); if (ret) return ret; - /* Unconditionally invalidate GPU caches and TLBs. */ - ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + ret = switch_context(request); if (ret) return ret; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9cc1d678674f..c436a28d50e4 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ipu_dc_disable(ipu); ipu_prg_disable(ipu); + drm_crtc_vblank_off(crtc); + spin_lock_irq(&crtc->dev->event_lock); - if (crtc->state->event) { + if (crtc->state->event && !crtc->state->active) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); - - drm_crtc_vblank_off(crtc); } static void imx_drm_crtc_reset(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index d11e2281dde6..7e43b25785f7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, return 0; err_free: - drm_gem_object_put_unlocked(&shmem->base); + drm_gem_handle_delete(file, args->handle); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index e62fe24b1a2e..5bb0f0a084e9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, output = vgdev->outputs + scanout; new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); + drm_connector_update_edid_property(&output->conn, new_edid); spin_lock(&vgdev->display_info_lock); old_edid = output->edid; output->edid = new_edid; - drm_connector_update_edid_property(&output->conn, output->edid); spin_unlock(&vgdev->display_info_lock); kfree(old_edid); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index eac0c54c5970..b032d3899fa3 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -80,6 +80,7 @@ #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 #define HID_DEVICE_ID_ALPS_U1 0x1215 #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C +#define HID_DEVICE_ID_ALPS_1222 0x1222 #define USB_VENDOR_ID_AMI 0x046b @@ -269,6 +270,7 @@ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 +#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_DEVICE_ID_ASUS_AK1D 0x1125 #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 @@ -569,6 +571,7 @@ #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e +#define USB_DEVICE_ID_HUION_HS64 0x006d #define USB_VENDOR_ID_IBM 0x04b3 #define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index e564bff86515..bfcf2ee58d14 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -30,6 +30,7 @@ #define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_LONG 0x11 +#define REPORT_ID_HIDPP_VERY_LONG 0x12 #define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_LONG_LENGTH 20 @@ -1242,7 +1243,8 @@ static int logi_dj_ll_raw_request(struct hid_device *hid, int ret; if ((buf[0] == REPORT_ID_HIDPP_SHORT) || - (buf[0] == REPORT_ID_HIDPP_LONG)) { + (buf[0] == REPORT_ID_HIDPP_LONG) || + (buf[0] == REPORT_ID_HIDPP_VERY_LONG)) { if (count < 2) return -EINVAL; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 5df5dd56ecc8..b603c14d043b 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1776,6 +1776,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, + { .driver_data = MT_CLS_WIN_8_DUAL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ALPS_JP, + HID_DEVICE_ID_ALPS_1222) }, /* Lenovo X1 TAB Gen 2 */ { .driver_data = MT_CLS_WIN_8_DUAL, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index e5ca6fe2ca57..671a285724f9 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -42,6 +42,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET }, diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 8fe02d81265d..914fb527ae7a 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -369,6 +369,8 @@ static const struct hid_device_id uclogic_devices[] = { USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, + USB_DEVICE_ID_HUION_HS64) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 0187c9f8fc22..273d784fff66 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -977,6 +977,8 @@ int uclogic_params_init(struct uclogic_params *params, /* FALL THROUGH */ case VID_PID(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET): + case VID_PID(USB_VENDOR_ID_HUION, + USB_DEVICE_ID_HUION_HS64): case VID_PID(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET): case VID_PID(USB_VENDOR_ID_UCLOGIC, diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 22ba21457035..aa2dbed30fc3 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -816,9 +816,9 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data) goto end_err_fw_release; release_firmware(fw); - kfree(filename); dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n", filename); + kfree(filename); return 0; end_err_fw_release: diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index c0487b34d2cf..6ba944b40fdb 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -891,7 +891,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device) */ static int hid_ishtp_cl_suspend(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); @@ -912,7 +912,7 @@ static int hid_ishtp_cl_suspend(struct device *device) */ static int hid_ishtp_cl_resume(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index 794e700d65f7..c47c3328a0f4 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -471,7 +471,6 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev, } ishtp_device_ready = true; - dev_set_drvdata(&device->dev, device); return device; } @@ -640,6 +639,20 @@ void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device) EXPORT_SYMBOL(ishtp_get_drvdata); /** + * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance + * @device: device instance + * + * Get ish_cl_device instance which embeds device instance in it. + * + * Return: pointer to ishtp_cl_device instance + */ +struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device) +{ + return to_ishtp_cl_device(device); +} +EXPORT_SYMBOL(ishtp_dev_to_cl_device); + +/** * ishtp_bus_new_client() - Create a new client * @dev: ISHTP device instance * diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c index c67c961ab6cc..a4c1aacba1ff 100644 --- a/drivers/irqchip/irq-csky-mpintc.c +++ b/drivers/irqchip/irq-csky-mpintc.c @@ -89,8 +89,19 @@ static int csky_irq_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - /* Enable interrupt destination */ - cpu |= BIT(31); + /* + * The csky,mpintc could support auto irq deliver, but it only + * could deliver external irq to one cpu or all cpus. So it + * doesn't support deliver external irq to a group of cpus + * with cpu_mask. + * SO we only use auto deliver mode when affinity mask_val is + * equal to cpu_present_mask. + * + */ + if (cpumask_equal(mask_val, cpu_present_mask)) + cpu = 0; + else + cpu |= BIT(31); writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d29b44b677e4..35500801dc2b 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) } static int its_wait_for_range_completion(struct its_node *its, - struct its_cmd_block *from, + u64 prev_idx, struct its_cmd_block *to) { - u64 rd_idx, from_idx, to_idx; + u64 rd_idx, to_idx, linear_idx; u32 count = 1000000; /* 1s! */ - from_idx = its_cmd_ptr_to_offset(its, from); + /* Linearize to_idx if the command set has wrapped around */ to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; while (1) { + s64 delta; + rd_idx = readl_relaxed(its->base + GITS_CREADR); - /* Direct case */ - if (from_idx < to_idx && rd_idx >= to_idx) - break; + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; - /* Wrapped case */ - if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) + linear_idx += delta; + if (linear_idx >= to_idx) break; count--; if (!count) { - pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", - from_idx, to_idx, rd_idx); + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); return -1; } + prev_idx = rd_idx; cpu_relax(); udelay(1); } @@ -775,6 +786,7 @@ void name(struct its_node *its, \ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ synctype *sync_obj; \ unsigned long flags; \ + u64 rd_idx; \ \ raw_spin_lock_irqsave(&its->lock, flags); \ \ @@ -796,10 +808,11 @@ void name(struct its_node *its, \ } \ \ post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ next_cmd = its_post_commands(its); \ raw_spin_unlock_irqrestore(&its->lock, flags); \ \ - if (its_wait_for_range_completion(its, cmd, next_cmd)) \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index d32268cc1174..f3985469c221 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d) intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); cd = irq_data_get_irq_chip_data(d); - write_gic_vl_map(intr, cd->map); + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); if (cd->mask) write_gic_vl_smask(BIT(intr)); } @@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); - write_gic_vo_map(intr, map); + write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index 011b60a49e3f..ef4d625d2d80 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom parent_fwspec.param[1] = vint_desc->vint_id; parent_virq = irq_create_fwspec_mapping(&parent_fwspec); - if (parent_virq <= 0) { + if (parent_virq == 0) { kfree(vint_desc); - return ERR_PTR(parent_virq); + return ERR_PTR(-EINVAL); } vint_desc->parent_virq = parent_virq; diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index 352e803f566e..728733a514c7 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str) return ERR_PTR(-EINVAL); } /* target_args */ - dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, - DM_MAX_STR_SIZE); + dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE, + GFP_KERNEL); if (!dev->target_args_array[n]) return ERR_PTR(-ENOMEM); @@ -272,10 +272,10 @@ static int __init dm_init_init(void) return 0; if (strlen(create) >= DM_MAX_STR_SIZE) { - DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); + DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE); return -EINVAL; } - str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); + str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL); if (!str) return -ENOMEM; @@ -283,7 +283,7 @@ static int __init dm_init_init(void) if (r) goto out; - DMINFO("waiting for all devices to be available before creating mapped devices\n"); + DMINFO("waiting for all devices to be available before creating mapped devices"); wait_for_device_probe(); list_for_each_entry(dev, &devices, list) { diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 9ea2b0291f20..e549392e0ea5 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -60,6 +60,7 @@ #define WRITE_LOG_VERSION 1ULL #define WRITE_LOG_MAGIC 0x6a736677736872ULL +#define WRITE_LOG_SUPER_SECTOR 0 /* * The disk format for this is braindead simple. @@ -115,6 +116,7 @@ struct log_writes_c { struct list_head logging_blocks; wait_queue_head_t wait; struct task_struct *log_kthread; + struct completion super_done; }; struct pending_block { @@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio) bio_put(bio); } +static void log_end_super(struct bio *bio) +{ + struct log_writes_c *lc = bio->bi_private; + + complete(&lc->super_done); + log_end_io(bio); +} + /* * Meant to be called if there is an error, it will free all the pages * associated with the block. @@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry, bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; bio_set_dev(bio, lc->logdev->bdev); - bio->bi_end_io = log_end_io; + bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? + log_end_super : log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc) super.nr_entries = cpu_to_le64(lc->logged_entries); super.sectorsize = cpu_to_le32(lc->sectorsize); - if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { + if (write_metadata(lc, &super, sizeof(super), NULL, 0, + WRITE_LOG_SUPER_SECTOR)) { DMERR("Couldn't write super"); return -1; } + /* + * Super sector should be writen in-order, otherwise the + * nr_entries could be rewritten incorrectly by an old bio. + */ + wait_for_completion_io(&lc->super_done); + return 0; } @@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->logging_blocks); init_waitqueue_head(&lc->wait); + init_completion(&lc->super_done); atomic_set(&lc->io_blocks, 0); atomic_set(&lc->pending_blocks, 0); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 350cf0451456..ec8b27e20de3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv) gfp = GFP_NOIO; } argv = kmalloc_array(new_size, sizeof(*argv), gfp); - if (argv) { + if (argv && old_argv) { memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 720d06531aa3..ea24ff0612e3 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, BUG(); } - DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str, - block); + DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name, + type_str, block); if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) DMERR("%s: reached maximum errors", v->data_dev->name); diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c index fe8efba2d45f..857991cb3cbb 100644 --- a/drivers/mfd/stmfx.c +++ b/drivers/mfd/stmfx.c @@ -204,12 +204,11 @@ static struct irq_chip stmfx_irq_chip = { static irqreturn_t stmfx_irq_handler(int irq, void *data) { struct stmfx *stmfx = data; - unsigned long n, pending; - u32 ack; - int ret; + unsigned long bits; + u32 pending, ack; + int n, ret; - ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, - (u32 *)&pending); + ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending); if (ret) return IRQ_NONE; @@ -224,7 +223,8 @@ static irqreturn_t stmfx_irq_handler(int irq, void *data) return IRQ_NONE; } - for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX) + bits = pending; + for_each_set_bit(n, &bits, STMFX_REG_IRQ_SRC_MAX) handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n)); return IRQ_HANDLED; diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig index 19a96ce515c1..66b7cffdb0c2 100644 --- a/drivers/mtd/nand/raw/ingenic/Kconfig +++ b/drivers/mtd/nand/raw/ingenic/Kconfig @@ -16,7 +16,7 @@ config MTD_NAND_JZ4780 if MTD_NAND_JZ4780 config MTD_NAND_INGENIC_ECC - tristate + bool config MTD_NAND_JZ4740_ECC tristate "Hardware BCH support for JZ4740 SoC" diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile index 1ac4f455baea..b63d36889263 100644 --- a/drivers/mtd/nand/raw/ingenic/Makefile +++ b/drivers/mtd/nand/raw/ingenic/Makefile @@ -2,7 +2,9 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o -obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o +ingenic_nand-y += ingenic_nand_drv.o +ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o + obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index d3e085c5685a..c954189606f6 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -30,7 +30,6 @@ int ingenic_ecc_calculate(struct ingenic_ecc *ecc, { return ecc->ops->calculate(ecc, params, buf, ecc_code); } -EXPORT_SYMBOL(ingenic_ecc_calculate); /** * ingenic_ecc_correct() - detect and correct bit errors @@ -51,7 +50,6 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc, { return ecc->ops->correct(ecc, params, buf, ecc_code); } -EXPORT_SYMBOL(ingenic_ecc_correct); /** * ingenic_ecc_get() - get the ECC controller device @@ -111,7 +109,6 @@ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node) } return ecc; } -EXPORT_SYMBOL(of_ingenic_ecc_get); /** * ingenic_ecc_release() - release the ECC controller device @@ -122,7 +119,6 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc) clk_disable_unprepare(ecc->clk); put_device(ecc->dev); } -EXPORT_SYMBOL(ingenic_ecc_release); int ingenic_ecc_probe(struct platform_device *pdev) { @@ -159,8 +155,3 @@ int ingenic_ecc_probe(struct platform_device *pdev) return 0; } EXPORT_SYMBOL(ingenic_ecc_probe); - -MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); -MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); -MODULE_DESCRIPTION("Ingenic ECC common driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index d7b7c0f13909..d7b7c0f13909 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index b5b68aa16eb3..6eb131292eb2 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4662,7 +4662,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) memorg = nanddev_get_memorg(&chip->base); memorg->planes_per_lun = 1; memorg->luns_per_target = 1; - memorg->ntargets = 1; /* * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) @@ -5027,6 +5026,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, if (ret) return ret; + memorg->ntargets = maxchips; + /* Read the flash type */ ret = nand_detect(chip, table); if (ret) { diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index b021a5720b42..89773293c64d 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -51,6 +51,7 @@ #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) #define NFC_REG_SPARE_AREA 0x00A0 #define NFC_REG_PAT_ID 0x00A4 +#define NFC_REG_MDMA_CNT 0x00C4 #define NFC_RAM0_BASE 0x0400 #define NFC_RAM1_BASE 0x0800 @@ -69,6 +70,7 @@ #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8) #define NFC_SAM BIT(12) #define NFC_RAM_METHOD BIT(14) +#define NFC_DMA_TYPE_NORMAL BIT(15) #define NFC_DEBUG_CTL BIT(31) /* define bit use in NFC_ST */ @@ -205,14 +207,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) * NAND Controller capabilities structure: stores NAND controller capabilities * for distinction between compatible strings. * - * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB - * instead of MBUS (less configuration). A10, A10s, A13 and - * A20 use the MBUS but no extra configuration is needed. + * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM + * through MBUS on A23/A33 needs extra configuration. * @reg_io_data: I/O data register * @dma_maxburst: DMA maxburst */ struct sunxi_nfc_caps { - bool sram_through_ahb; + bool extra_mbus_conf; unsigned int reg_io_data; unsigned int dma_maxburst; }; @@ -368,28 +369,12 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, goto err_unmap_buf; } - /* - * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual) - * refers to the NAND controller's internal SRAM. This memory is mapped - * and so is accessible from the AHB. It seems that it can also be - * accessed by the MBUS. MBUS accesses are mandatory when using the - * internal DMA instead of the external DMA engine. - * - * During DMA I/O operation, either we access this memory from the AHB - * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the - * MBUS. In this case, we should also configure the MBUS DMA length - * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS - * are also limited to 32kiB pages. - */ - if (nfc->caps->sram_through_ahb) - writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, - nfc->regs + NFC_REG_CTL); - else - writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, - nfc->regs + NFC_REG_CTL); - + writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, + nfc->regs + NFC_REG_CTL); writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); writel(chunksize, nfc->regs + NFC_REG_CNT); + if (nfc->caps->extra_mbus_conf) + writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); dmat = dmaengine_submit(dmad); @@ -2151,6 +2136,11 @@ static int sunxi_nfc_probe(struct platform_device *pdev) dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; dmaengine_slave_config(nfc->dmac, &dmac_cfg); + + if (nfc->caps->extra_mbus_conf) + writel(readl(nfc->regs + NFC_REG_CTL) | + NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL); + } else { dev_warn(dev, "failed to request rxtx DMA channel\n"); } @@ -2200,7 +2190,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = { }; static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = { - .sram_through_ahb = true, + .extra_mbus_conf = true, .reg_io_data = NFC_REG_A23_IO_DATA, .dma_maxburst = 8, }; diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e5586390026a..e6c646007cda 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), SPINAND_INFO("GD5F4GQ4xA", 0xF4, - NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1), + NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 6502727049a8..21def3f8fb36 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO("MX35LF1GE4AB", 0x12, - NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(4, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, @@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, mx35lf1ge4ab_ecc_get_status)), SPINAND_INFO("MX35LF2GE4AB", 0x22, - NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(4, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 73172d7f512b..0c2ec1c21434 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1636,6 +1636,95 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor) return 0; } +/** + * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits. + * @nor: pointer to a 'struct spi_nor' + * + * Read-modify-write function that clears the Block Protection bits from the + * Status Register without affecting other bits. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_clear_sr_bp(struct spi_nor *nor) +{ + int ret; + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + + ret = read_sr(nor); + if (ret < 0) { + dev_err(nor->dev, "error while reading status register\n"); + return ret; + } + + write_enable(nor); + + ret = write_sr(nor, ret & ~mask); + if (ret) { + dev_err(nor->dev, "write to status register failed\n"); + return ret; + } + + ret = spi_nor_wait_till_ready(nor); + if (ret) + dev_err(nor->dev, "timeout while writing status register\n"); + return ret; +} + +/** + * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection + * bits on spansion flashes. + * @nor: pointer to a 'struct spi_nor' + * + * Read-modify-write function that clears the Block Protection bits from the + * Status Register without affecting other bits. The function is tightly + * coupled with the spansion_quad_enable() function. Both assume that the Write + * Register with 16 bits, together with the Read Configuration Register (35h) + * instructions are supported. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor) +{ + int ret; + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + u8 sr_cr[2] = {0}; + + /* Check current Quad Enable bit value. */ + ret = read_cr(nor); + if (ret < 0) { + dev_err(nor->dev, + "error while reading configuration register\n"); + return ret; + } + + /* + * When the configuration register Quad Enable bit is one, only the + * Write Status (01h) command with two data bytes may be used. + */ + if (ret & CR_QUAD_EN_SPAN) { + sr_cr[1] = ret; + + ret = read_sr(nor); + if (ret < 0) { + dev_err(nor->dev, + "error while reading status register\n"); + return ret; + } + sr_cr[0] = ret & ~mask; + + ret = write_sr_cr(nor, sr_cr); + if (ret) + dev_err(nor->dev, "16-bit write register failed\n"); + return ret; + } + + /* + * If the Quad Enable bit is zero, use the Write Status (01h) command + * with one data byte. + */ + return spi_nor_clear_sr_bp(nor); +} + /* Used when the "_ext_id" is two bytes at most */ #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ .id = { \ @@ -3660,6 +3749,8 @@ static int spi_nor_init_params(struct spi_nor *nor, default: /* Kept only for backward compatibility purpose. */ params->quad_enable = spansion_quad_enable; + if (nor->clear_sr_bp) + nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; break; } @@ -3912,17 +4003,13 @@ static int spi_nor_init(struct spi_nor *nor) { int err; - /* - * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up - * with the software protection bits set - */ - if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || - JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || - JEDEC_MFR(nor->info) == SNOR_MFR_SST || - nor->info->flags & SPI_NOR_HAS_LOCK) { - write_enable(nor); - write_sr(nor, 0); - spi_nor_wait_till_ready(nor); + if (nor->clear_sr_bp) { + err = nor->clear_sr_bp(nor); + if (err) { + dev_err(nor->dev, + "fail to clear block protection bits\n"); + return err; + } } if (nor->quad_enable) { @@ -4047,6 +4134,16 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (info->flags & SPI_S3AN) nor->flags |= SNOR_F_READY_XSR_RDY; + /* + * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up + * with the software protection bits set. + */ + if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || + JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || + JEDEC_MFR(nor->info) == SNOR_MFR_SST || + nor->info->flags & SPI_NOR_HAS_LOCK) + nor->clear_sr_bp = spi_nor_clear_sr_bp; + /* Parse the Serial Flash Discoverable Parameters table. */ ret = spi_nor_init_params(nor, ¶ms); if (ret) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 407f4095a37a..799fc38c5c34 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4320,12 +4320,12 @@ void bond_setup(struct net_device *bond_dev) bond_dev->features |= NETIF_F_NETNS_LOCAL; bond_dev->hw_features = BOND_VLAN_FEATURES | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; bond_dev->features |= bond_dev->hw_features; + bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } /* Destroy a bonding device. diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index f46086fa9064..db91b213eae1 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -436,9 +436,9 @@ int ksz_switch_register(struct ksz_device *dev, return PTR_ERR(dev->reset_gpio); if (dev->reset_gpio) { - gpiod_set_value(dev->reset_gpio, 1); + gpiod_set_value_cansleep(dev->reset_gpio, 1); mdelay(10); - gpiod_set_value(dev->reset_gpio, 0); + gpiod_set_value_cansleep(dev->reset_gpio, 0); } mutex_init(&dev->dev_mutex); @@ -487,7 +487,7 @@ void ksz_switch_remove(struct ksz_device *dev) dsa_unregister_switch(dev->ds); if (dev->reset_gpio) - gpiod_set_value(dev->reset_gpio, 1); + gpiod_set_value_cansleep(dev->reset_gpio, 1); } EXPORT_SYMBOL(ksz_switch_remove); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 18bc035da850..1fff462a4175 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -843,9 +843,14 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic) return err; if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (hweight < AQ_VLAN_MAX_FILTERS) - err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true); + if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, + !(aq_nic->packet_filter & IFF_PROMISC)); + aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; + } else { /* otherwise left in promiscue mode */ + aq_nic->aq_nic_cfg.is_vlan_force_promisc = true; + } } return err; @@ -866,6 +871,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic) if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl)) return -EOPNOTSUPP; + aq_nic->aq_nic_cfg.is_vlan_force_promisc = true; err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false); if (err) return err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0da5e161ec5d..41172fbebddd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -126,6 +126,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; cfg->features = cfg->aq_hw_caps->hw_features; + cfg->is_vlan_force_promisc = true; } static int aq_nic_update_link_status(struct aq_nic_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index eb2e3c7c36f9..0f22f5d5691b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -35,6 +35,7 @@ struct aq_nic_cfg_s { u32 flow_control; u32 link_speed_msk; u32 wol; + bool is_vlan_force_promisc; u16 is_mc_list_enabled; u16 mc_list_count; bool is_autoneg; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1c7593d54035..13ac2661a473 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -778,8 +778,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, unsigned int packet_filter) { unsigned int i = 0U; + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + + hw_atl_rpfl2promiscuous_mode_en_set(self, + IS_FILTER_ENABLED(IFF_PROMISC)); + + hw_atl_rpf_vlan_prom_mode_en_set(self, + IS_FILTER_ENABLED(IFF_PROMISC) || + cfg->is_vlan_force_promisc); - hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); hw_atl_rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_ALLMULTI), 0); @@ -788,13 +795,13 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); - self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); + cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) hw_atl_rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled && - (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + (cfg->is_mc_list_enabled && + (i <= cfg->mc_list_count)) ? + 1U : 0U, i); return aq_hw_err_from_flags(self); } @@ -1086,7 +1093,7 @@ static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) { /* set promisc in case of disabing the vland filter */ - hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable); + hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); return aq_hw_err_from_flags(self); } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 2375a13bb446..262a28ff81fc 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4180,7 +4180,7 @@ static int macb_probe(struct platform_device *pdev) if (PTR_ERR(mac) == -EPROBE_DEFER) { err = -EPROBE_DEFER; goto err_out_free_netdev; - } else if (!IS_ERR(mac)) { + } else if (!IS_ERR_OR_NULL(mac)) { ether_addr_copy(bp->dev->dev_addr, mac); } else { macb_get_hwaddr(bp); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 8a6785173228..492f8769ac12 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -891,7 +891,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { struct be_adapter *adapter = netdev_priv(netdev); - int status; + int status, cnt; u8 link_status = 0; if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { @@ -902,6 +902,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); + /* check link status before offline tests */ + link_status = netif_carrier_ok(netdev); + if (test->flags & ETH_TEST_FL_OFFLINE) { if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) test->flags |= ETH_TEST_FL_FAILED; @@ -922,13 +925,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, test->flags |= ETH_TEST_FL_FAILED; } - status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); - if (status) { - test->flags |= ETH_TEST_FL_FAILED; - data[4] = -1; - } else if (!link_status) { + /* link status was down prior to test */ + if (!link_status) { test->flags |= ETH_TEST_FL_FAILED; data[4] = 1; + return; + } + + for (cnt = 10; cnt; cnt--) { + status = be_cmd_link_status_query(adapter, NULL, &link_status, + 0); + if (status) { + test->flags |= ETH_TEST_FL_FAILED; + data[4] = -1; + break; + } + + if (link_status) + break; + + msleep_interruptible(500); } } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 67f9bb6e941b..9b036c857b1d 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); sw32(cr, RxENA | sr32(cr)); sw32(ier, IE); @@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) sw32(txdp, sis_priv->tx_ring_dma); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); } /** @@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) spin_unlock_irqrestore(&sis_priv->lock, flags); return NETDEV_TX_OK; } - sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); + sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len); sw32(cr, TxENA | sr32(cr)); sis_priv->cur_tx ++; @@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) do { status = sr32(isr); - if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) + if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0) /* nothing intresting happened */ break; handled = 1; @@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) /* Rx interrupt */ sis900_rx(net_dev); - if (status & (TxURN | TxERR | TxIDLE)) + if (status & (TxURN | TxERR | TxIDLE | TxDESC)) /* Tx interrupt */ sis900_finish_xmit(net_dev); @@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev) if (tx_status & OWN) { /* The packet is not transmitted yet (owned by hardware) ! - * Note: the interrupt is generated only when Tx Machine - * is idle, so this is an almost impossible case */ + * Note: this is an almost impossible condition + * in case of TxDESC ('descriptor interrupt') */ break; } @@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); sw32(cr, RxENA | sr32(cr)); sw32(ier, IE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 2dcdf761d525..020159622559 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -112,7 +112,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, * programmed with (2^32 – <new_sec_value>) */ if (gmac4) - sec = (100000000ULL - sec); + sec = -sec; value = readl(ioaddr + PTP_TCR); if (value & PTP_TCR_TSCTRLSSR) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 06dd51f47cfd..06358fe5b245 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2947,12 +2947,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* Manage tx mitigation */ tx_q->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames <= tx_q->tx_count_frames) { + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !(priv->synopsys_id >= DWMAC_CORE_4_00 && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - tx_q->tx_count_frames = 0; - } else { - stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); @@ -3166,12 +3169,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * element in case of no SG. */ tx_q->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames <= tx_q->tx_count_frames) { + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !(priv->synopsys_id >= DWMAC_CORE_4_00 && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - tx_q->tx_count_frames = 0; - } else { - stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index ff61dd8748de..66c8e65f6872 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>"); MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); +MODULE_SOFTDEP("pre: arc4"); MODULE_VERSION("1.0.2"); static unsigned int diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index b48006e7fa2f..36916bf51ee6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2128,12 +2128,12 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; dev->hw_features = TEAM_VLAN_FEATURES | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } static int team_newlink(struct net *src_net, struct net_device *dev, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d080f8048e52..8b4ad10cf940 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1482,7 +1482,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, * different. Ignore the current interface if the number of endpoints * equals the number for the diag interface (two). */ - info = (void *)&id->driver_info; + info = (void *)id->driver_info; if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { if (desc->bNumEndpoints == 2) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 11b9525dff27..311b0cc6eb98 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -350,8 +350,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; + const struct in6_addr *nexthop; struct neighbour *neigh; - struct in6_addr *nexthop; int ret; nf_reset(skb); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 98af9ecd4a90..ca3793002e2f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -859,7 +859,7 @@ static int pci_pm_suspend_noirq(struct device *dev) pci_dev->bus->self->skip_bus_pm = true; } - if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) { + if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { dev_dbg(dev, "PCI PM: Skipped\n"); goto Fixup; } @@ -914,10 +914,10 @@ static int pci_pm_resume_noirq(struct device *dev) /* * In the suspend-to-idle case, devices left in D0 during suspend will * stay in D0, so it is not necessary to restore or update their - * configuration here and attempting to put them into D0 again may - * confuse some firmware, so avoid doing that. + * configuration here and attempting to put them into D0 again is + * pointless, so avoid doing that. */ - if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware()) + if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index f464f8cd274b..7e526bcf5e0b 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->mask_set); + eint->cur_mask[d->hwirq >> 5] &= ~mask; + writel(mask, reg); } @@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->mask_clr); + eint->cur_mask[d->hwirq >> 5] |= mask; + writel(mask, reg); if (eint->dual_edge[d->hwirq]) @@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint, } } -static void mtk_eint_chip_read_mask(const struct mtk_eint *eint, - void __iomem *base, u32 *buf) -{ - int port; - void __iomem *reg; - - for (port = 0; port < eint->hw->ports; port++) { - reg = base + eint->regs->mask + (port << 2); - buf[port] = ~readl_relaxed(reg); - /* Mask is 0 when irq is enabled, and 1 when disabled. */ - } -} - static int mtk_eint_irq_request_resources(struct irq_data *d) { struct mtk_eint *eint = irq_data_get_irq_chip_data(d); @@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); struct mtk_eint *eint = irq_desc_get_handler_data(desc); unsigned int status, eint_num; - int offset, index, virq; + int offset, mask_offset, index, virq; void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat); int dual_edge, start_level, curr_level; @@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) status = readl(reg); while (status) { offset = __ffs(status); + mask_offset = eint_num >> 5; index = eint_num + offset; virq = irq_find_mapping(eint->domain, index); status &= ~BIT(offset); + /* + * If we get an interrupt on pin that was only required + * for wake (but no real interrupt requested), mask the + * interrupt (as would mtk_eint_resume do anyway later + * in the resume sequence). + */ + if (eint->wake_mask[mask_offset] & BIT(offset) && + !(eint->cur_mask[mask_offset] & BIT(offset))) { + writel_relaxed(BIT(offset), reg - + eint->regs->stat + + eint->regs->mask_set); + } + dual_edge = eint->dual_edge[index]; if (dual_edge) { /* @@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) int mtk_eint_do_suspend(struct mtk_eint *eint) { - mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask); mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask); return 0; diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 568ca96cdb6d..3a235487e38d 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -771,6 +771,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, if (ret < 0) goto fail; + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->irq_controller = device_property_read_bool(dev, "interrupt-controller"); if (mcp->irq && mcp->irq_controller) { @@ -812,10 +816,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, goto fail; } - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - if (one_regmap_config) { mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL, "mcp23xxx-pinctrl.%d", raw_chip_address); diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 3b4ca52d2456..fb76fb2e9ea5 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -396,7 +396,7 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info, return -1; } -#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32))) +#define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32)))) static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector, unsigned int group) @@ -412,19 +412,21 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev, /* * f is encoded on two bits. - * bit 0 of f goes in BIT(pin) of ALT0, bit 1 of f goes in BIT(pin) of - * ALT1 + * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of + * ALT[1] * This is racy because both registers can't be updated at the same time * but it doesn't matter much for now. */ - regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, pin->pin), + regmap_update_bits(info->map, REG_ALT(0, info, pin->pin), BIT(p), f << p); - regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, pin->pin), + regmap_update_bits(info->map, REG_ALT(1, info, pin->pin), BIT(p), f << (p - 1)); return 0; } +#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32))) + static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned int pin, bool input) @@ -432,7 +434,7 @@ static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev, struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); unsigned int p = pin % 32; - regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, p), BIT(p), + regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p), input ? 0 : BIT(p)); return 0; @@ -445,9 +447,9 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev, struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); unsigned int p = offset % 32; - regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, offset), + regmap_update_bits(info->map, REG_ALT(0, info, offset), BIT(p), 0); - regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, offset), + regmap_update_bits(info->map, REG_ALT(1, info, offset), BIT(p), 0); return 0; diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index ecee4b3ff073..377b07b2feeb 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; + unsigned char op; spin_lock_irqsave(&adapter->hw_lock, flags); @@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd } cmd->scsi_done = done; + op = cmd->cmnd[0]; dev_dbg(&cmd->device->sdev_gendev, - "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); + "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op); spin_unlock_irqrestore(&adapter->hw_lock, flags); - pvscsi_kick_io(adapter, cmd->cmnd[0]); + pvscsi_kick_io(adapter, op); return 0; } diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 524ecdc2a9bb..2ec355003524 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ -obj-$(CONFIG_SOC_TI) += ti/ +obj-y += ti/ obj-$(CONFIG_ARCH_U8500) += ux500/ obj-$(CONFIG_PLAT_VERSATILE) += versatile/ obj-y += xilinx/ diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index ea0859f7b185..d7d50d48d05d 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -75,10 +75,10 @@ config TI_SCI_PM_DOMAINS called ti_sci_pm_domains. Note this is needed early in boot before rootfs may be available. +endif # SOC_TI + config TI_SCI_INTA_MSI_DOMAIN bool select GENERIC_MSI_IRQ_DOMAIN help Driver to enable Interrupt Aggregator specific MSI Domain. - -endif # SOC_TI diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index b6e4862cc242..51ddca2033e0 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -81,6 +81,12 @@ out: return CHAP_DIGEST_UNKNOWN; } +static void chap_close(struct iscsi_conn *conn) +{ + kfree(conn->auth_protocol); + conn->auth_protocol = NULL; +} + static struct iscsi_chap *chap_server_open( struct iscsi_conn *conn, struct iscsi_node_auth *auth, @@ -118,7 +124,7 @@ static struct iscsi_chap *chap_server_open( case CHAP_DIGEST_UNKNOWN: default: pr_err("Unsupported CHAP_A value\n"); - kfree(conn->auth_protocol); + chap_close(conn); return NULL; } @@ -133,19 +139,13 @@ static struct iscsi_chap *chap_server_open( * Generate Challenge. */ if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { - kfree(conn->auth_protocol); + chap_close(conn); return NULL; } return chap; } -static void chap_close(struct iscsi_conn *conn) -{ - kfree(conn->auth_protocol); - conn->auth_protocol = NULL; -} - static int chap_server_compute_md5( struct iscsi_conn *conn, struct iscsi_node_auth *auth, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index f4a075303e9a..6949ea8bc387 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -502,7 +502,7 @@ iblock_execute_write_same(struct se_cmd *cmd) /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> SECTOR_SHIFT; - sectors -= 1; + sectors -= sg->length >> SECTOR_SHIFT; } iblock_submit_bios(&list); diff --git a/fs/Kconfig b/fs/Kconfig index f1046cf6ad85..bfb1c6095c7a 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -11,7 +11,6 @@ config DCACHE_WORD_ACCESS config VALIDATE_FS_PARSER bool "Validate filesystem parameter description" - default y help Enable this to perform validation of the parameter description for a filesystem when it is registered. diff --git a/fs/afs/callback.c b/fs/afs/callback.c index d441bef72163..915010464572 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -275,9 +275,9 @@ static void afs_break_one_callback(struct afs_server *server, struct afs_super_info *as = AFS_FS_S(cbi->sb); struct afs_volume *volume = as->volume; - write_lock(&volume->cb_break_lock); + write_lock(&volume->cb_v_break_lock); volume->cb_v_break++; - write_unlock(&volume->cb_break_lock); + write_unlock(&volume->cb_v_break_lock); } else { data.volume = NULL; data.fid = *fid; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index b42d9d09669c..18a50d4febcf 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -56,6 +56,16 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren } /* + * Set the file size and block count. Estimate the number of 512 bytes blocks + * used, rounded up to nearest 1K for consistency with other AFS clients. + */ +static void afs_set_i_size(struct afs_vnode *vnode, u64 size) +{ + i_size_write(&vnode->vfs_inode, size); + vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1; +} + +/* * Initialise an inode from the vnode status. */ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, @@ -124,12 +134,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type); } - /* - * Estimate 512 bytes blocks used, rounded up to nearest 1K - * for consistency with other AFS clients. - */ - inode->i_blocks = ((i_size_read(inode) + 1023) >> 10) << 1; - i_size_write(&vnode->vfs_inode, status->size); + afs_set_i_size(vnode, status->size); vnode->invalid_before = status->data_version; inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); @@ -207,11 +212,13 @@ static void afs_apply_status(struct afs_fs_cursor *fc, if (expected_version && *expected_version != status->data_version) { - kdebug("vnode modified %llx on {%llx:%llu} [exp %llx] %s", - (unsigned long long) status->data_version, - vnode->fid.vid, vnode->fid.vnode, - (unsigned long long) *expected_version, - fc->type ? fc->type->name : "???"); + if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) + pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n", + vnode->fid.vid, vnode->fid.vnode, + (unsigned long long)*expected_version, + (unsigned long long)status->data_version, + fc->type ? fc->type->name : "???"); + vnode->invalid_before = status->data_version; if (vnode->status.type == AFS_FTYPE_DIR) { if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) @@ -230,7 +237,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, if (data_changed) { inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); - i_size_write(&vnode->vfs_inode, status->size); + afs_set_i_size(vnode, status->size); } } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 8a67bf741880..7ee63526c6a2 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -109,10 +109,8 @@ struct afs_call { struct rxrpc_call *rxcall; /* RxRPC call handle */ struct key *key; /* security for this call */ struct afs_net *net; /* The network namespace */ - union { - struct afs_server *server; - struct afs_vlserver *vlserver; - }; + struct afs_server *server; /* The fileserver record if fs op (pins ref) */ + struct afs_vlserver *vlserver; /* The vlserver record if vl op */ struct afs_cb_interest *cbi; /* Callback interest for server used */ struct afs_vnode *lvnode; /* vnode being locked */ void *request; /* request data (first part) */ @@ -616,7 +614,7 @@ struct afs_volume { unsigned int servers_seq; /* Incremented each time ->servers changes */ unsigned cb_v_break; /* Break-everything counter. */ - rwlock_t cb_break_lock; + rwlock_t cb_v_break_lock; afs_voltype_t type; /* type of volume */ short error; diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 08fdb3951c49..1a414300b654 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -43,6 +43,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, atomic_set(&volume->usage, 1); INIT_LIST_HEAD(&volume->proc_link); rwlock_init(&volume->servers_lock); + rwlock_init(&volume->cb_v_break_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask); @@ -2095,6 +2095,7 @@ SYSCALL_DEFINE6(io_pgetevents, struct __aio_sigset ksig = { NULL, }; sigset_t ksigmask, sigsaved; struct timespec64 ts; + bool interrupted; int ret; if (timeout && unlikely(get_timespec64(&ts, timeout))) @@ -2108,8 +2109,10 @@ SYSCALL_DEFINE6(io_pgetevents, return ret; ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); - restore_user_sigmask(ksig.sigmask, &sigsaved); - if (signal_pending(current) && !ret) + + interrupted = signal_pending(current); + restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); + if (interrupted && !ret) ret = -ERESTARTNOHAND; return ret; @@ -2128,6 +2131,7 @@ SYSCALL_DEFINE6(io_pgetevents_time32, struct __aio_sigset ksig = { NULL, }; sigset_t ksigmask, sigsaved; struct timespec64 ts; + bool interrupted; int ret; if (timeout && unlikely(get_old_timespec32(&ts, timeout))) @@ -2142,8 +2146,10 @@ SYSCALL_DEFINE6(io_pgetevents_time32, return ret; ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); - restore_user_sigmask(ksig.sigmask, &sigsaved); - if (signal_pending(current) && !ret) + + interrupted = signal_pending(current); + restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); + if (interrupted && !ret) ret = -ERESTARTNOHAND; return ret; @@ -2193,6 +2199,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, struct __compat_aio_sigset ksig = { NULL, }; sigset_t ksigmask, sigsaved; struct timespec64 t; + bool interrupted; int ret; if (timeout && get_old_timespec32(&t, timeout)) @@ -2206,8 +2213,10 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, return ret; ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); - restore_user_sigmask(ksig.sigmask, &sigsaved); - if (signal_pending(current) && !ret) + + interrupted = signal_pending(current); + restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); + if (interrupted && !ret) ret = -ERESTARTNOHAND; return ret; @@ -2226,6 +2235,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, struct __compat_aio_sigset ksig = { NULL, }; sigset_t ksigmask, sigsaved; struct timespec64 t; + bool interrupted; int ret; if (timeout && get_timespec64(&t, timeout)) @@ -2239,8 +2249,10 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, return ret; ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); - restore_user_sigmask(ksig.sigmask, &sigsaved); - if (signal_pending(current) && !ret) + + interrupted = signal_pending(current); + restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted); + if (interrupted && !ret) ret = -ERESTARTNOHAND; return ret; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 82a48e830018..e4b59e76afb0 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -856,9 +856,14 @@ err: static int load_flat_shared_library(int id, struct lib_info *libs) { + /* + * This is a fake bprm struct; only the members "buf", "file" and + * "filename" are actually used. + */ struct linux_binprm bprm; int res; char buf[16]; + loff_t pos = 0; memset(&bprm, 0, sizeof(bprm)); @@ -872,25 +877,11 @@ static int load_flat_shared_library(int id, struct lib_info *libs) if (IS_ERR(bprm.file)) return res; - bprm.cred = prepare_exec_creds(); - res = -ENOMEM; - if (!bprm.cred) - goto out; - - /* We don't really care about recalculating credentials at this point - * as we're past the point of no return and are dealing with shared - * libraries. - */ - bprm.called_set_creds = 1; + res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos); - res = prepare_binprm(&bprm); - - if (!res) + if (res >= 0) res = load_flat_file(&bprm, libs, id, NULL); - abort_creds(bprm.cred); - -out: allow_write_access(bprm.file); fput(bprm.file); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 6af2d0d4a87a..c8a9b89b922d 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2121,9 +2121,10 @@ retry: if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { dout("build_path path+%d: %p SNAPDIR\n", pos, temp); - } else if (stop_on_nosnap && inode && + } else if (stop_on_nosnap && inode && dentry != temp && ceph_snap(inode) == CEPH_NOSNAP) { spin_unlock(&temp->d_lock); + pos++; /* get rid of any prepended '/' */ break; } else { pos -= temp->d_name.len; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 3fdc6a41b304..9fd56b0acd7e 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -2372,6 +2372,41 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, kfree(dfs_rsp); return rc; } + +static int +parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf, + u32 plen, char **target_path, + struct cifs_sb_info *cifs_sb) +{ + unsigned int sub_len; + unsigned int sub_offset; + + /* We only handle Symbolic Link : MS-FSCC 2.1.2.4 */ + if (le32_to_cpu(symlink_buf->ReparseTag) != IO_REPARSE_TAG_SYMLINK) { + cifs_dbg(VFS, "srv returned invalid symlink buffer\n"); + return -EIO; + } + + sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset); + sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength); + if (sub_offset + 20 > plen || + sub_offset + sub_len + 20 > plen) { + cifs_dbg(VFS, "srv returned malformed symlink buffer\n"); + return -EIO; + } + + *target_path = cifs_strndup_from_utf16( + symlink_buf->PathBuffer + sub_offset, + sub_len, true, cifs_sb->local_nls); + if (!(*target_path)) + return -ENOMEM; + + convert_delimiter(*target_path, '/'); + cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path); + + return 0; +} + #define SMB2_SYMLINK_STRUCT_SIZE \ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) @@ -2401,11 +2436,13 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct kvec close_iov[1]; struct smb2_create_rsp *create_rsp; struct smb2_ioctl_rsp *ioctl_rsp; - char *ioctl_buf; + struct reparse_data_buffer *reparse_buf; u32 plen; cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); + *target_path = NULL; + if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -2483,17 +2520,36 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, if ((rc == 0) && (is_reparse_point)) { /* See MS-FSCC 2.3.23 */ - ioctl_buf = (char *)ioctl_rsp + le32_to_cpu(ioctl_rsp->OutputOffset); + reparse_buf = (struct reparse_data_buffer *) + ((char *)ioctl_rsp + + le32_to_cpu(ioctl_rsp->OutputOffset)); plen = le32_to_cpu(ioctl_rsp->OutputCount); if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) > rsp_iov[1].iov_len) { - cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", plen); + cifs_dbg(VFS, "srv returned invalid ioctl len: %d\n", + plen); + rc = -EIO; + goto querty_exit; + } + + if (plen < 8) { + cifs_dbg(VFS, "reparse buffer is too small. Must be " + "at least 8 bytes but was %d\n", plen); + rc = -EIO; + goto querty_exit; + } + + if (plen < le16_to_cpu(reparse_buf->ReparseDataLength) + 8) { + cifs_dbg(VFS, "srv returned invalid reparse buf " + "length: %d\n", plen); rc = -EIO; goto querty_exit; } - /* Do stuff with ioctl_buf/plen */ + rc = parse_reparse_symlink( + (struct reparse_symlink_data_buffer *)reparse_buf, + plen, target_path, cifs_sb); goto querty_exit; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index c7d5813bebd8..858353d20c39 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -914,7 +914,19 @@ struct reparse_mount_point_data_buffer { __u8 PathBuffer[0]; /* Variable Length */ } __packed; -/* See MS-FSCC 2.1.2.4 and cifspdu.h for struct reparse_symlink_data */ +#define SYMLINK_FLAG_RELATIVE 0x00000001 + +struct reparse_symlink_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __le16 SubstituteNameOffset; + __le16 SubstituteNameLength; + __le16 PrintNameOffset; + __le16 PrintNameLength; + __le32 Flags; + __u8 PathBuffer[0]; /* Variable Length */ +} __packed; /* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ @@ -720,12 +720,11 @@ static void *dax_insert_entry(struct xa_state *xas, xas_reset(xas); xas_lock_irq(xas); - if (dax_entry_size(entry) != dax_entry_size(new_entry)) { + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + void *old; + dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); - } - - if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -734,7 +733,7 @@ static void *dax_insert_entry(struct xa_state *xas, * existing entry is a PMD, we will just leave the PMD in the * tree and dirty it if necessary. */ - void *old = dax_lock_entry(xas, new_entry); + old = dax_lock_entry(xas, new_entry); WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | DAX_LOCKED)); entry = new_entry; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index c6f513100cc9..4c74c768ae43 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2325,7 +2325,7 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, error = do_epoll_wait(epfd, events, maxevents, timeout); - restore_user_sigmask(sigmask, &sigsaved); + restore_user_sigmask(sigmask, &sigsaved, error == -EINTR); return error; } @@ -2350,7 +2350,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, err = do_epoll_wait(epfd, events, maxevents, timeout); - restore_user_sigmask(sigmask, &sigsaved); + restore_user_sigmask(sigmask, &sigsaved, err == -EINTR); return err; } diff --git a/fs/inode.c b/fs/inode.c index df6542ec3b88..2bf21e2c90fc 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -362,7 +362,7 @@ EXPORT_SYMBOL(inc_nlink); static void __address_space_init_once(struct address_space *mapping) { - xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ); + xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); diff --git a/fs/io_uring.c b/fs/io_uring.c index 86a2bd721900..4ef62a45045d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -579,6 +579,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, state->cur_req++; } + req->file = NULL; req->ctx = ctx; req->flags = 0; /* one is dropped after submission, the other at completion */ @@ -1801,10 +1802,8 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, req->sequence = ctx->cached_sq_head - 1; } - if (!io_op_needs_file(s->sqe)) { - req->file = NULL; + if (!io_op_needs_file(s->sqe)) return 0; - } if (flags & IOSQE_FIXED_FILE) { if (unlikely(!ctx->user_files || @@ -2201,11 +2200,12 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, } ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events); - if (ret == -ERESTARTSYS) - ret = -EINTR; if (sig) - restore_user_sigmask(sig, &sigsaved); + restore_user_sigmask(sig, &sigsaved, ret == -ERESTARTSYS); + + if (ret == -ERESTARTSYS) + ret = -EINTR; return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0; } diff --git a/fs/namespace.c b/fs/namespace.c index 7660c2749c96..6fbc9126367a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2596,11 +2596,12 @@ static int do_move_mount(struct path *old_path, struct path *new_path) if (!check_mnt(p)) goto out; - /* The thing moved should be either ours or completely unattached. */ - if (attached && !check_mnt(old)) + /* The thing moved must be mounted... */ + if (!is_mounted(&old->mnt)) goto out; - if (!attached && !(ns && is_anon_ns(ns))) + /* ... and either ours or the root of anon namespace */ + if (!(attached ? check_mnt(old) : is_anon_ns(ns))) goto out; if (old->mnt.mnt_flags & MNT_LOCKED) diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index a809989807d6..19f856f45689 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -18,7 +18,7 @@ #define NFSDBG_FACILITY NFSDBG_PNFS_LD -static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS; +static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO; static unsigned int dataserver_retrans; static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 618e66078ee5..1a0cdeb3b875 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1563,7 +1563,7 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) * Never use more than a third of the remaining memory, * unless it's the only way to give this client a slot: */ - avail = clamp_t(int, avail, slotsize, total_avail/3); + avail = clamp_t(unsigned long, avail, slotsize, total_avail/3); num = min_t(int, num, avail / slotsize); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); diff --git a/fs/proc/array.c b/fs/proc/array.c index 2edbb657f859..55180501b915 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -462,7 +462,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, * a program is not able to use ptrace(2) in that case. It is * safe because the task has stopped executing permanently. */ - if (permitted && (task->flags & PF_DUMPCORE)) { + if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) { if (try_get_task_stack(task)) { eip = KSTK_EIP(task); esp = KSTK_ESP(task); diff --git a/fs/proc/base.c b/fs/proc/base.c index 9c8ca6cd3ce4..255f6754c70d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3077,8 +3077,7 @@ static const struct file_operations proc_tgid_base_operations = { struct pid *tgid_pidfd_to_pid(const struct file *file) { - if (!d_is_dir(file->f_path.dentry) || - (file->f_op != &proc_tgid_base_operations)) + if (file->f_op != &proc_tgid_base_operations) return ERR_PTR(-EBADF); return proc_pid(file_inode(file)); diff --git a/fs/select.c b/fs/select.c index 6cbc9ff56ba0..a4d8f6e8b63c 100644 --- a/fs/select.c +++ b/fs/select.c @@ -758,10 +758,9 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, return ret; ret = core_sys_select(n, inp, outp, exp, to); + restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND); ret = poll_select_copy_remaining(&end_time, tsp, type, ret); - restore_user_sigmask(sigmask, &sigsaved); - return ret; } @@ -1106,8 +1105,7 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, ret = do_sys_poll(ufds, nfds, to); - restore_user_sigmask(sigmask, &sigsaved); - + restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); /* We can restart this syscall, usually */ if (ret == -EINTR) ret = -ERESTARTNOHAND; @@ -1142,8 +1140,7 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds, ret = do_sys_poll(ufds, nfds, to); - restore_user_sigmask(sigmask, &sigsaved); - + restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); /* We can restart this syscall, usually */ if (ret == -EINTR) ret = -ERESTARTNOHAND; @@ -1350,10 +1347,9 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, return ret; ret = compat_core_sys_select(n, inp, outp, exp, to); + restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND); ret = poll_select_copy_remaining(&end_time, tsp, type, ret); - restore_user_sigmask(sigmask, &sigsaved); - return ret; } @@ -1425,8 +1421,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, ret = do_sys_poll(ufds, nfds, to); - restore_user_sigmask(sigmask, &sigsaved); - + restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); /* We can restart this syscall, usually */ if (ret == -EINTR) ret = -ERESTARTNOHAND; @@ -1461,8 +1456,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds, ret = do_sys_poll(ufds, nfds, to); - restore_user_sigmask(sigmask, &sigsaved); - + restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR); /* We can restart this syscall, usually */ if (ret == -EINTR) ret = -ERESTARTNOHAND; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ae0b8b5f69e6..ccbdbd62f0d8 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -40,6 +40,16 @@ enum userfaultfd_state { /* * Start with fault_pending_wqh and fault_wqh so they're more likely * to be in the same cacheline. + * + * Locking order: + * fd_wqh.lock + * fault_pending_wqh.lock + * fault_wqh.lock + * event_wqh.lock + * + * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, + * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's + * also taken in IRQ context. */ struct userfaultfd_ctx { /* waitqueue head for the pending (i.e. not read) userfaults */ @@ -458,7 +468,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : TASK_KILLABLE; - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). @@ -470,7 +480,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) * __add_wait_queue. */ set_current_state(blocking_state); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); if (!is_vm_hugetlb_page(vmf->vma)) must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, @@ -552,13 +562,13 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) * kernel stack can be released after the list_del_init. */ if (!list_empty_careful(&uwq.wq.entry)) { - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* * No need of list_del_init(), the uwq on the stack * will be freed shortly anyway. */ list_del(&uwq.wq.entry); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); } /* @@ -583,7 +593,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, init_waitqueue_entry(&ewq->wq, current); release_new_ctx = NULL; - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). @@ -613,15 +623,15 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, break; } - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); } __set_current_state(TASK_RUNNING); - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); if (release_new_ctx) { struct vm_area_struct *vma; @@ -918,10 +928,10 @@ wakeup: * the last page faults that may have been already waiting on * the fault_*wqh. */ - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); /* Flush pending events that may still wait on event_wqh */ wake_up_all(&ctx->event_wqh); @@ -1134,7 +1144,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, if (!ret && msg->event == UFFD_EVENT_FORK) { ret = resolve_userfault_fork(ctx, fork_nctx, msg); - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); if (!list_empty(&fork_event)) { /* * The fork thread didn't abort, so we can @@ -1180,7 +1190,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, if (ret) userfaultfd_ctx_put(fork_nctx); } - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); } return ret; @@ -1219,14 +1229,14 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf, static void __wake_userfault(struct userfaultfd_ctx *ctx, struct userfaultfd_wake_range *range) { - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* wake all in the range and autoremove */ if (waitqueue_active(&ctx->fault_pending_wqh)) __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, range); if (waitqueue_active(&ctx->fault_wqh)) __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); } static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, @@ -1881,7 +1891,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) wait_queue_entry_t *wq; unsigned long pending = 0, total = 0; - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { pending++; total++; @@ -1889,7 +1899,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { total++; } - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); /* * If more protocols will be added, there will be all shown diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index 82c9e0c020b2..e10470ed7c4f 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -130,7 +130,7 @@ #define CLKID_MALI_1_SEL 172 #define CLKID_MALI_1 174 #define CLKID_MALI 175 -#define CLKID_MPLL_5OM 177 +#define CLKID_MPLL_50M 177 #define CLKID_CPU_CLK 187 #define CLKID_PCIE_PLL 201 #define CLKID_VDEC_1 204 diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h index 6a0b70a37d78..3b21d0522c91 100644 --- a/include/dt-bindings/clock/sifive-fu540-prci.h +++ b/include/dt-bindings/clock/sifive-fu540-prci.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Copyright (C) 2018-2019 SiFive, Inc. * Wesley Terpstra diff --git a/include/linux/device.h b/include/linux/device.h index 848fc71c6ba6..4a295e324ac5 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -704,7 +704,8 @@ extern unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); extern void devm_free_pages(struct device *dev, unsigned long addr); -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res); void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 16255c2ca2f4..0d6b4bc191c5 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -103,6 +103,7 @@ void ishtp_put_device(struct ishtp_cl_device *cl_dev); void ishtp_get_device(struct ishtp_cl_device *cl_dev); void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data); void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device); +struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *dev); int ishtp_register_event_cb(struct ishtp_cl_device *device, void (*read_cb)(struct ishtp_cl_device *)); struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 74b1ee9027f5..0c9bc231107f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -93,7 +93,8 @@ #define DIV_ROUND_DOWN_ULL(ll, d) \ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) -#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d)) +#define DIV_ROUND_UP_ULL(ll, d) \ + DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) #if BITS_PER_LONG == 32 # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index b3d360b0ee3d..9f57cdfcc93d 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -373,6 +373,8 @@ struct flash_info; * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode + * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from + * the SPI NOR Status Register. * completely locked * @priv: the private data */ @@ -410,6 +412,7 @@ struct spi_nor { int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*quad_enable)(struct spi_nor *nor); + int (*clear_sr_bp)(struct spi_nor *nor); void *priv; }; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 9ec3544baee2..fe0b29bf2df7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -333,19 +333,6 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } -static inline struct page *find_subpage(struct page *page, pgoff_t offset) -{ - unsigned long mask; - - if (PageHuge(page)) - return page; - - VM_BUG_ON_PAGE(PageTail(page), page); - - mask = (1UL << compound_order(page)) - 1; - return page + (offset & mask); -} - struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2ddae518dce6..16e38c286d46 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -241,6 +241,7 @@ struct perf_event; #define PERF_PMU_CAP_NO_INTERRUPT 0x01 #define PERF_PMU_CAP_NO_NMI 0x02 #define PERF_PMU_CAP_AUX_NO_SG 0x04 +#define PERF_PMU_CAP_EXTENDED_REGS 0x08 #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 476747456bca..2d12e97d5e7b 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -11,6 +11,11 @@ struct perf_regs { #ifdef CONFIG_HAVE_PERF_REGS #include <asm/perf_regs.h> + +#ifndef PERF_REG_EXTENDED_MASK +#define PERF_REG_EXTENDED_MASK 0 +#endif + u64 perf_reg_value(struct pt_regs *regs, int idx); int perf_reg_validate(u64 mask); u64 perf_reg_abi(struct task_struct *task); @@ -18,6 +23,9 @@ void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs, struct pt_regs *regs_user_copy); #else + +#define PERF_REG_EXTENDED_MASK 0 + static inline u64 perf_reg_value(struct pt_regs *regs, int idx) { return 0; diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 7bb77850c65a..3c202a11a79e 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -68,7 +68,7 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) static inline void *pfn_t_to_virt(pfn_t pfn) { - if (pfn_t_has_page(pfn)) + if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn))) return __va(pfn_t_to_phys(pfn)); return NULL; } diff --git a/include/linux/signal.h b/include/linux/signal.h index 9702016734b1..78c2bb376954 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -276,7 +276,7 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, sigset_t *oldset, size_t sigsetsize); extern void restore_user_sigmask(const void __user *usigmask, - sigset_t *sigsaved); + sigset_t *sigsaved, bool interrupted); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8594001e8be8..f0d262ad7b78 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -209,8 +209,9 @@ extern int suspend_valid_only_mem(suspend_state_t state); extern unsigned int pm_suspend_global_flags; -#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) -#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) +#define PM_SUSPEND_FLAG_FW_SUSPEND BIT(0) +#define PM_SUSPEND_FLAG_FW_RESUME BIT(1) +#define PM_SUSPEND_FLAG_NO_PLATFORM BIT(2) static inline void pm_suspend_clear_flags(void) { @@ -227,6 +228,11 @@ static inline void pm_set_resume_via_firmware(void) pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; } +static inline void pm_set_suspend_no_platform(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_NO_PLATFORM; +} + /** * pm_suspend_via_firmware - Check if platform firmware will suspend the system. * @@ -268,6 +274,22 @@ static inline bool pm_resume_via_firmware(void) return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); } +/** + * pm_suspend_no_platform - Check if platform may change device power states. + * + * To be called during system-wide power management transitions to sleep states + * or during the subsequent system-wide transitions back to the working state. + * + * Return 'true' if the power states of devices remain under full control of the + * kernel throughout the system-wide suspend and resume cycle in progress (that + * is, if a device is put into a certain power state during suspend, it can be + * expected to remain in that state during resume). + */ +static inline bool pm_suspend_no_platform(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_NO_PLATFORM); +} + /* Suspend-to-idle state machnine. */ enum s2idle_states { S2IDLE_STATE_NONE, /* Not suspended/suspending. */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 0e01e6129145..5921599b6dc4 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -265,6 +265,7 @@ enum xa_lock_type { #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) +#define XA_FLAGS_ACCOUNT ((__force gfp_t)32U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 4790beaa86e0..ee7405e759ba 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -262,8 +262,8 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk) inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; } -static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, - struct in6_addr *daddr) +static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, + const struct in6_addr *daddr) { if (rt->rt6i_flags & RTF_GATEWAY) return &rt->rt6i_gateway; diff --git a/include/net/route.h b/include/net/route.h index 065b47754f05..55ff71ffb796 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -221,6 +221,7 @@ void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); struct rtable *rt_dst_alloc(struct net_device *dev, unsigned int flags, u16 type, bool nopolicy, bool noxfrm, bool will_cache); +struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt); struct in_ifaddr; void fib_add_ifaddr(struct in_ifaddr *); diff --git a/include/net/tls.h b/include/net/tls.h index 4a55ce6a303f..53d96bca220d 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -373,21 +373,6 @@ static inline bool tls_is_partially_sent_record(struct tls_context *ctx) return !!ctx->partially_sent_record; } -static inline int tls_complete_pending_work(struct sock *sk, - struct tls_context *ctx, - int flags, long *timeo) -{ - int rc = 0; - - if (unlikely(sk->sk_write_pending)) - rc = wait_on_pending_writer(sk, timeo); - - if (!rc && tls_is_partially_sent_record(ctx)) - rc = tls_push_partial_record(sk, ctx, flags); - - return rc; -} - static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) { return tls_ctx->pending_open_record_frags; diff --git a/init/initramfs.c b/init/initramfs.c index 178130fd61c2..c47dad0884f7 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -617,7 +617,7 @@ static inline void clean_rootfs(void) #endif /* CONFIG_BLK_DEV_RAM */ #ifdef CONFIG_BLK_DEV_RAM -static void populate_initrd_image(char *err) +static void __init populate_initrd_image(char *err) { ssize_t written; int fd; @@ -637,7 +637,7 @@ static void populate_initrd_image(char *err) ksys_close(fd); } #else -static void populate_initrd_image(char *err) +static void __init populate_initrd_image(char *err) { printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); } diff --git a/kernel/cpu.c b/kernel/cpu.c index 077fde6fb953..ef1c565edc5d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1964,6 +1964,9 @@ static ssize_t write_cpuhp_fail(struct device *dev, if (ret) return ret; + if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) + return -EINVAL; + /* * Cannot fail STARTING/DYING callbacks. */ @@ -2339,6 +2342,9 @@ static int __init mitigations_parse_cmdline(char *arg) cpu_mitigations = CPU_MITIGATIONS_AUTO; else if (!strcmp(arg, "auto,nosmt")) cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; + else + pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", + arg); return 0; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 23efe6792abc..ab52cf510378 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5007,6 +5007,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) if (perf_event_check_period(event, value)) return -EINVAL; + if (!event->attr.freq && (value & (1ULL << 63))) + return -EINVAL; + event_function_call(event, __perf_event_period, &value); return 0; @@ -5925,7 +5928,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, if (user_mode(regs)) { regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; - } else if (current->mm) { + } else if (!(current->flags & PF_KTHREAD)) { perf_get_regs_user(regs_user, regs, regs_user_copy); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; @@ -10041,6 +10044,12 @@ void perf_pmu_unregister(struct pmu *pmu) } EXPORT_SYMBOL_GPL(perf_pmu_unregister); +static inline bool has_extended_regs(struct perf_event *event) +{ + return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || + (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); +} + static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) { struct perf_event_context *ctx = NULL; @@ -10072,12 +10081,16 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) perf_event_ctx_unlock(event->group_leader, ctx); if (!ret) { + if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && + has_extended_regs(event)) + ret = -EOPNOTSUPP; + if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && - event_has_any_exclude_flag(event)) { - if (event->destroy) - event->destroy(event); + event_has_any_exclude_flag(event)) ret = -EINVAL; - } + + if (ret && event->destroy) + event->destroy(event); } if (ret) diff --git a/kernel/fork.c b/kernel/fork.c index 75675b9bf6df..fe83343da24b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -248,7 +248,11 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) struct page *page = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); - return page ? page_address(page) : NULL; + if (likely(page)) { + tsk->stack = page_address(page); + return tsk->stack; + } + return NULL; #endif } @@ -1712,31 +1716,6 @@ const struct file_operations pidfd_fops = { #endif }; -/** - * pidfd_create() - Create a new pid file descriptor. - * - * @pid: struct pid that the pidfd will reference - * - * This creates a new pid file descriptor with the O_CLOEXEC flag set. - * - * Note, that this function can only be called after the fd table has - * been unshared to avoid leaking the pidfd to the new process. - * - * Return: On success, a cloexec pidfd is returned. - * On error, a negative errno number will be returned. - */ -static int pidfd_create(struct pid *pid) -{ - int fd; - - fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid), - O_RDWR | O_CLOEXEC); - if (fd < 0) - put_pid(pid); - - return fd; -} - static void __delayed_free_task(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); @@ -1774,6 +1753,7 @@ static __latent_entropy struct task_struct *copy_process( int pidfd = -1, retval; struct task_struct *p; struct multiprocess_signals delayed; + struct file *pidfile = NULL; /* * Don't allow sharing the root directory with processes in a different @@ -1822,8 +1802,6 @@ static __latent_entropy struct task_struct *copy_process( } if (clone_flags & CLONE_PIDFD) { - int reserved; - /* * - CLONE_PARENT_SETTID is useless for pidfds and also * parent_tidptr is used to return pidfds. @@ -1834,16 +1812,6 @@ static __latent_entropy struct task_struct *copy_process( if (clone_flags & (CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD)) return ERR_PTR(-EINVAL); - - /* - * Verify that parent_tidptr is sane so we can potentially - * reuse it later. - */ - if (get_user(reserved, parent_tidptr)) - return ERR_PTR(-EFAULT); - - if (reserved != 0) - return ERR_PTR(-EINVAL); } /* @@ -2058,11 +2026,21 @@ static __latent_entropy struct task_struct *copy_process( * if the fd table isn't shared). */ if (clone_flags & CLONE_PIDFD) { - retval = pidfd_create(pid); + retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; + + pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, + O_RDWR | O_CLOEXEC); + if (IS_ERR(pidfile)) { + put_unused_fd(pidfd); + retval = PTR_ERR(pidfile); + goto bad_fork_free_pid; + } + get_pid(pid); /* held by pidfile now */ + retval = put_user(pidfd, parent_tidptr); if (retval) goto bad_fork_put_pidfd; @@ -2180,6 +2158,9 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } + /* past the last point of failure */ + if (pidfile) + fd_install(pidfd, pidfile); init_task_pid_links(p); if (likely(p->pid)) { @@ -2246,8 +2227,10 @@ bad_fork_cancel_cgroup: bad_fork_cgroup_threadgroup_change_end: cgroup_threadgroup_change_end(current); bad_fork_put_pidfd: - if (clone_flags & CLONE_PIDFD) - ksys_close(pidfd); + if (clone_flags & CLONE_PIDFD) { + fput(pidfile); + put_unused_fd(pidfd); + } bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 9505101ed2bc..096211299c07 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -493,6 +493,9 @@ int suspend_devices_and_enter(suspend_state_t state) pm_suspend_target_state = state; + if (state == PM_SUSPEND_TO_IDLE) + pm_set_suspend_no_platform(); + error = platform_suspend_begin(state); if (error) goto Close; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 8456b6e2205f..705887f63288 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -79,9 +79,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, */ static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) { - rcu_read_lock(); - __ptrace_link(child, new_parent, __task_cred(new_parent)); - rcu_read_unlock(); + __ptrace_link(child, new_parent, current_cred()); } /** diff --git a/kernel/signal.c b/kernel/signal.c index d622eac9d169..edf8915ddd54 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2912,7 +2912,8 @@ EXPORT_SYMBOL(set_compat_user_sigmask); * This is useful for syscalls such as ppoll, pselect, io_pgetevents and * epoll_pwait where a new sigmask is passed in from userland for the syscalls. */ -void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved) +void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved, + bool interrupted) { if (!usigmask) @@ -2922,7 +2923,7 @@ void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved) * Restoring sigmask here can lead to delivering signals that the above * syscalls are intended to block because of the sigmask passed in. */ - if (signal_pending(current)) { + if (interrupted) { current->saved_sigmask = *sigsaved; set_restore_sigmask(); return; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 38277af44f5c..576c41644e77 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -34,7 +34,6 @@ #include <linux/hash.h> #include <linux/rcupdate.h> #include <linux/kprobes.h> -#include <linux/memory.h> #include <trace/events/sched.h> @@ -2611,12 +2610,10 @@ static void ftrace_run_update_code(int command) { int ret; - mutex_lock(&text_mutex); - ret = ftrace_arch_code_modify_prepare(); FTRACE_WARN_ON(ret); if (ret) - goto out_unlock; + return; /* * By default we use stop_machine() to modify the code. @@ -2628,9 +2625,6 @@ static void ftrace_run_update_code(int command) ret = ftrace_arch_code_modify_post_process(); FTRACE_WARN_ON(ret); - -out_unlock: - mutex_unlock(&text_mutex); } static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, @@ -5784,7 +5778,6 @@ void ftrace_module_enable(struct module *mod) struct ftrace_page *pg; mutex_lock(&ftrace_lock); - mutex_lock(&text_mutex); if (ftrace_disabled) goto out_unlock; @@ -5846,7 +5839,6 @@ void ftrace_module_enable(struct module *mod) ftrace_arch_code_modify_post_process(); out_unlock: - mutex_unlock(&text_mutex); mutex_unlock(&ftrace_lock); process_cached_mods(mod->name); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 83e08b78dbee..c3aabb576fe5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6719,11 +6719,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, break; } #endif - if (!tr->allocated_snapshot) { + if (tr->allocated_snapshot) + ret = resize_buffer_duplicate_size(&tr->max_buffer, + &tr->trace_buffer, iter->cpu_file); + else ret = tracing_alloc_snapshot_instance(tr); - if (ret < 0) - break; - } + if (ret < 0) + break; local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) @@ -7126,12 +7128,24 @@ static ssize_t tracing_err_log_write(struct file *file, return count; } +static int tracing_err_log_release(struct inode *inode, struct file *file) +{ + struct trace_array *tr = inode->i_private; + + trace_array_put(tr); + + if (file->f_mode & FMODE_READ) + seq_release(inode, file); + + return 0; +} + static const struct file_operations tracing_err_log_fops = { .open = tracing_err_log_open, .write = tracing_err_log_write, .read = seq_read, .llseek = seq_lseek, - .release = tracing_release_generic_tr, + .release = tracing_err_log_release, }; static int tracing_buffers_open(struct inode *inode, struct file *filp) diff --git a/lib/devres.c b/lib/devres.c index 69bed2f38306..6a0e9bd6524a 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap); * if (IS_ERR(base)) * return PTR_ERR(base); */ -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res) { resource_size_t size; void __iomem *dest_ptr; diff --git a/lib/idr.c b/lib/idr.c index c34e256d2f01..66a374892482 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -228,11 +228,21 @@ void *idr_get_next(struct idr *idr, int *nextid) { struct radix_tree_iter iter; void __rcu **slot; + void *entry = NULL; unsigned long base = idr->idr_base; unsigned long id = *nextid; id = (id < base) ? 0 : id - base; - slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); + radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) { + entry = rcu_dereference_raw(*slot); + if (!entry) + continue; + if (!xa_is_internal(entry)) + break; + if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry)) + break; + slot = radix_tree_iter_retry(&iter); + } if (!slot) return NULL; id = iter.index + base; @@ -241,7 +251,7 @@ void *idr_get_next(struct idr *idr, int *nextid) return NULL; *nextid = id; - return rcu_dereference_raw(*slot); + return entry; } EXPORT_SYMBOL(idr_get_next); diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 82b19e4f1189..2fd7a46d55ec 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -24,6 +24,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) { mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; + struct karatsuba_ctx karactx = {}; mpi_ptr_t xp_marker = NULL; mpi_ptr_t tspace = NULL; mpi_ptr_t rp, ep, mp, bp; @@ -150,13 +151,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) int c; mpi_limb_t e; mpi_limb_t carry_limb; - struct karatsuba_ctx karactx; xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); if (!xp) goto enomem; - memset(&karactx, 0, sizeof karactx); negative_result = (ep[0] & 1) && base->sign; i = esize - 1; @@ -281,8 +280,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) if (mod_shift_cnt) mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); MPN_NORMALIZE(rp, rsize); - - mpihelp_release_karatsuba_ctx(&karactx); } if (negative_result && rsize) { @@ -299,6 +296,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) leave: rc = 0; enomem: + mpihelp_release_karatsuba_ctx(&karactx); if (assign_rp) mpi_assign_limb_space(res, rp, size); if (mp_marker) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 5d4bad8bd96a..9d631a7b6a70 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -38,6 +38,12 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } +static void xa_insert_index(struct xarray *xa, unsigned long index) +{ + XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), + GFP_KERNEL) != 0); +} + static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -338,6 +344,37 @@ static noinline void check_xa_shrink(struct xarray *xa) } } +static noinline void check_insert(struct xarray *xa) +{ + unsigned long i; + + for (i = 0; i < 1024; i++) { + xa_insert_index(xa, i); + XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); + XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); + xa_erase_index(xa, i); + } + + for (i = 10; i < BITS_PER_LONG; i++) { + xa_insert_index(xa, 1UL << i); + XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); + XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); + xa_erase_index(xa, 1UL << i); + + xa_insert_index(xa, (1UL << i) - 1); + XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); + XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); + xa_erase_index(xa, (1UL << i) - 1); + } + + xa_insert_index(xa, ~0UL); + XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); + XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); + xa_erase_index(xa, ~0UL); + + XA_BUG_ON(xa, !xa_empty(xa)); +} + static noinline void check_cmpxchg(struct xarray *xa) { void *FIVE = xa_mk_value(5); @@ -1527,6 +1564,7 @@ static int xarray_checks(void) check_xa_mark(&array); check_xa_shrink(&array); check_xas_erase(&array); + check_insert(&array); check_cmpxchg(&array); check_reserve(&array); check_reserve(&xa0); diff --git a/lib/xarray.c b/lib/xarray.c index 6be3acbb861f..446b956c9188 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -298,6 +298,8 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp) xas_destroy(xas); return false; } + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); if (!xas->xa_alloc) return false; @@ -325,6 +327,8 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) xas_destroy(xas); return false; } + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type); xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); @@ -358,8 +362,12 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift) if (node) { xas->xa_alloc = NULL; } else { - node = kmem_cache_alloc(radix_tree_node_cachep, - GFP_NOWAIT | __GFP_NOWARN); + gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; + + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + + node = kmem_cache_alloc(radix_tree_node_cachep, gfp); if (!node) { xas_set_err(xas, -ENOMEM); return NULL; diff --git a/mm/filemap.c b/mm/filemap.c index df2006ba0cfa..6dd9a2274c80 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -281,11 +281,11 @@ EXPORT_SYMBOL(delete_from_page_cache); * @pvec: pagevec with pages to delete * * The function walks over mapping->i_pages and removes pages passed in @pvec - * from the mapping. The function expects @pvec to be sorted by page index - * and is optimised for it to be dense. + * from the mapping. The function expects @pvec to be sorted by page index. * It tolerates holes in @pvec (mapping entries at those indices are not * modified). The function expects only THP head pages to be present in the - * @pvec. + * @pvec and takes care to delete all corresponding tail pages from the + * mapping as well. * * The function expects the i_pages lock to be held. */ @@ -294,44 +294,40 @@ static void page_cache_delete_batch(struct address_space *mapping, { XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); int total_pages = 0; - int i = 0; + int i = 0, tail_pages = 0; struct page *page; mapping_set_update(&xas, mapping); xas_for_each(&xas, page, ULONG_MAX) { - if (i >= pagevec_count(pvec)) + if (i >= pagevec_count(pvec) && !tail_pages) break; - - /* A swap/dax/shadow entry got inserted? Skip it. */ if (xa_is_value(page)) continue; - /* - * A page got inserted in our range? Skip it. We have our - * pages locked so they are protected from being removed. - * If we see a page whose index is higher than ours, it - * means our page has been removed, which shouldn't be - * possible because we're holding the PageLock. - */ - if (page != pvec->pages[i]) { - VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, - page); - continue; - } - - WARN_ON_ONCE(!PageLocked(page)); - - if (page->index == xas.xa_index) + if (!tail_pages) { + /* + * Some page got inserted in our range? Skip it. We + * have our pages locked so they are protected from + * being removed. + */ + if (page != pvec->pages[i]) { + VM_BUG_ON_PAGE(page->index > + pvec->pages[i]->index, page); + continue; + } + WARN_ON_ONCE(!PageLocked(page)); + if (PageTransHuge(page) && !PageHuge(page)) + tail_pages = HPAGE_PMD_NR - 1; page->mapping = NULL; - /* Leave page->index set: truncation lookup relies on it */ - - /* - * Move to the next page in the vector if this is a regular - * page or the index is of the last sub-page of this compound - * page. - */ - if (page->index + (1UL << compound_order(page)) - 1 == - xas.xa_index) + /* + * Leave page->index set: truncation lookup relies + * upon it + */ i++; + } else { + VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages + != pvec->pages[i]->index, page); + tail_pages--; + } xas_store(&xas, NULL); total_pages++; } @@ -1498,7 +1494,7 @@ EXPORT_SYMBOL(page_cache_prev_miss); struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) { XA_STATE(xas, &mapping->i_pages, offset); - struct page *page; + struct page *head, *page; rcu_read_lock(); repeat: @@ -1513,19 +1509,25 @@ repeat: if (!page || xa_is_value(page)) goto out; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* - * Has the page moved or been split? + * Has the page moved? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */ if (unlikely(page != xas_reload(&xas))) { - put_page(page); + put_page(head); goto repeat; } - page = find_subpage(page, offset); out: rcu_read_unlock(); @@ -1707,6 +1709,7 @@ unsigned find_get_entries(struct address_space *mapping, rcu_read_lock(); xas_for_each(&xas, page, ULONG_MAX) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1717,13 +1720,17 @@ unsigned find_get_entries(struct address_space *mapping, if (xa_is_value(page)) goto export; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - page = find_subpage(page, xas.xa_index); export: indices[ret] = xas.xa_index; @@ -1732,7 +1739,7 @@ export: break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1774,27 +1781,33 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); xas_for_each(&xas, page, end) { + struct page *head; if (xas_retry(&xas, page)) continue; /* Skip over shadow, swap and DAX entries */ if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *start = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1839,6 +1852,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, rcu_read_lock(); for (page = xas_load(&xas); page; page = xas_next(&xas)) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1848,19 +1862,24 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, if (xa_is_value(page)) break; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1896,6 +1915,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, rcu_read_lock(); xas_for_each_marked(&xas, page, end, tag) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1906,21 +1926,26 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *index = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -2603,7 +2628,7 @@ void filemap_map_pages(struct vm_fault *vmf, pgoff_t last_pgoff = start_pgoff; unsigned long max_idx; XA_STATE(xas, &mapping->i_pages, start_pgoff); - struct page *page; + struct page *head, *page; rcu_read_lock(); xas_for_each(&xas, page, end_pgoff) { @@ -2612,19 +2637,24 @@ void filemap_map_pages(struct vm_fault *vmf, if (xa_is_value(page)) goto next; + head = compound_head(page); + /* * Check for a locked page first, as a speculative * reference may adversely influence page migration. */ - if (PageLocked(page)) + if (PageLocked(head)) goto next; - if (!page_cache_get_speculative(page)) + if (!page_cache_get_speculative(head)) goto next; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto skip; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto skip; - page = find_subpage(page, xas.xa_index); if (!PageUptodate(page) || PageReadahead(page) || diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bb8b617e34ed..885642c82aaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2496,9 +2496,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) shmem_uncharge(head->mapping->host, 1); put_page(head + i); - } else if (!PageAnon(page)) { - __xa_store(&head->mapping->i_pages, head[i].index, - head + i, 0); } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac843d32b019..ede7e7f5d1ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1510,16 +1510,29 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, /* * Dissolve a given free hugepage into free buddy pages. This function does - * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the - * dissolution fails because a give page is not a free hugepage, or because - * free hugepages are fully reserved. + * nothing for in-use hugepages and non-hugepages. + * This function returns values like below: + * + * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use + * (allocated or reserved.) + * 0: successfully dissolved free hugepages or the page is not a + * hugepage (considered as already dissolved) */ int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; + /* Not to disrupt normal path by vainly holding hugetlb_lock */ + if (!PageHuge(page)) + return 0; + spin_lock(&hugetlb_lock); - if (PageHuge(page) && !page_count(page)) { + if (!PageHuge(page)) { + rc = 0; + goto out; + } + + if (!page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); @@ -1564,11 +1577,9 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { page = pfn_to_page(pfn); - if (PageHuge(page) && !page_count(page)) { - rc = dissolve_free_huge_page(page); - if (rc) - break; - } + rc = dissolve_free_huge_page(page); + if (rc) + break; } return rc; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0f7419938008..eaaa21b23215 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm, result = SCAN_FAIL; goto xa_locked; } - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); nr_none++; continue; } @@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm, list_add_tail(&page->lru, &pagelist); /* Finally, replace with the new page. */ - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); continue; out_unlock: unlock_page(page); diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c..650e65a46b9c 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -39,7 +39,6 @@ static void memfd_tag_pins(struct xa_state *xas) xas_for_each(xas, page, ULONG_MAX) { if (xa_is_value(page)) continue; - page = find_subpage(page, xas->xa_index); if (page_count(page) - page_mapcount(page) > 1) xas_set_mark(xas, MEMFD_TAG_PINNED); @@ -89,7 +88,6 @@ static int memfd_wait_for_pins(struct address_space *mapping) bool clear = true; if (xa_is_value(page)) continue; - page = find_subpage(page, xas.xa_index); if (page_count(page) - page_mapcount(page) != 1) { /* * On the last scan, we clean up all those tags diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 8da0334b9ca0..d9cc6606f409 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1730,6 +1730,8 @@ static int soft_offline_huge_page(struct page *page, int flags) if (!ret) { if (set_hwpoison_free_buddy_page(page)) num_poisoned_pages_inc(); + else + ret = -EBUSY; } } return ret; @@ -1854,11 +1856,8 @@ static int soft_offline_in_use_page(struct page *page, int flags) static int soft_offline_free_page(struct page *page) { - int rc = 0; - struct page *head = compound_head(page); + int rc = dissolve_free_huge_page(page); - if (PageHuge(head)) - rc = dissolve_free_huge_page(page); if (!rc) { if (set_hwpoison_free_buddy_page(page)) num_poisoned_pages_inc(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 01600d80ae01..fdcb73536319 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -306,7 +306,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) else { nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, *nodes); - pol->w.cpuset_mems_allowed = tmp; + pol->w.cpuset_mems_allowed = *nodes; } if (nodes_empty(tmp)) diff --git a/mm/migrate.c b/mm/migrate.c index f2ecc2855a12..e9594bc0d406 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -463,7 +463,7 @@ int migrate_page_move_mapping(struct address_space *mapping, for (i = 1; i < HPAGE_PMD_NR; i++) { xas_next(&xas); - xas_store(&xas, newpage); + xas_store(&xas, newpage + i); } } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5a58778c91d4..f719b64741d6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -987,8 +987,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ -static void check_panic_on_oom(struct oom_control *oc, - enum oom_constraint constraint) +static void check_panic_on_oom(struct oom_control *oc) { if (likely(!sysctl_panic_on_oom)) return; @@ -998,7 +997,7 @@ static void check_panic_on_oom(struct oom_control *oc, * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ - if (constraint != CONSTRAINT_NONE) + if (oc->constraint != CONSTRAINT_NONE) return; } /* Do not panic for oom kills triggered by sysrq */ @@ -1035,7 +1034,6 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier); bool out_of_memory(struct oom_control *oc) { unsigned long freed = 0; - enum oom_constraint constraint = CONSTRAINT_NONE; if (oom_killer_disabled) return false; @@ -1071,10 +1069,10 @@ bool out_of_memory(struct oom_control *oc) * Check if there were limitations on the allocation (only relevant for * NUMA and memcg) that may require different handling. */ - constraint = constrained_alloc(oc); - if (constraint != CONSTRAINT_MEMORY_POLICY) + oc->constraint = constrained_alloc(oc); + if (oc->constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; - check_panic_on_oom(oc, constraint); + check_panic_on_oom(oc); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d66bc8abe0af..8e3bc949ebcc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1826,7 +1826,8 @@ deferred_grow_zone(struct zone *zone, unsigned int order) first_deferred_pfn)) { pgdat->first_deferred_pfn = ULONG_MAX; pgdat_resize_unlock(pgdat, &flags); - return true; + /* Retry only once. */ + return first_deferred_pfn != ULONG_MAX; } /* diff --git a/mm/page_idle.c b/mm/page_idle.c index 0b39ec0c945c..295512465065 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) - end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; @@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) - end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; diff --git a/mm/page_io.c b/mm/page_io.c index 2e8019d0e048..a39aac2f8c8d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -29,10 +29,9 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { - int i, nr = hpage_nr_pages(page); struct bio *bio; - bio = bio_alloc(gfp_flags, nr); + bio = bio_alloc(gfp_flags, 1); if (bio) { struct block_device *bdev; @@ -41,9 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; - for (i = 0; i < nr; i++) - bio_add_page(bio, page + i, PAGE_SIZE, 0); - VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr); + bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); } return bio; } @@ -140,8 +137,10 @@ out: unlock_page(page); WRITE_ONCE(bio->bi_private, NULL); bio_put(bio); - blk_wake_io_task(waiter); - put_task_struct(waiter); + if (waiter) { + blk_wake_io_task(waiter); + put_task_struct(waiter); + } } int generic_swapfile_activate(struct swap_info_struct *sis, @@ -398,11 +397,12 @@ int swap_readpage(struct page *page, bool synchronous) * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. */ - get_task_struct(current); - bio->bi_private = current; bio_set_op_attrs(bio, REQ_OP_READ, 0); - if (synchronous) + if (synchronous) { bio->bi_opf |= REQ_HIPRI; + get_task_struct(current); + bio->bi_private = current; + } count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); diff --git a/mm/shmem.c b/mm/shmem.c index 1bb3b8dc8bb2..f4dce9c8670d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -614,7 +614,7 @@ static int shmem_add_to_page_cache(struct page *page, if (xas_error(&xas)) goto unlock; next: - xas_store(&xas, page); + xas_store(&xas, page + i); if (++i < nr) { xas_next(&xas); goto next; diff --git a/mm/swap_state.c b/mm/swap_state.c index eb714165afd2..85245fdec8d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -132,7 +132,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) for (i = 0; i < nr; i++) { VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); set_page_private(page + i, entry.val + i); - xas_store(&xas, page); + xas_store(&xas, page + i); xas_next(&xas); } address_space->nrpages += nr; @@ -167,7 +167,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry) for (i = 0; i < nr; i++) { void *entry = xas_store(&xas, NULL); - VM_BUG_ON_PAGE(entry != page, entry); + VM_BUG_ON_PAGE(entry != page + i, entry); set_page_private(page + i, 0); xas_next(&xas); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4c9e150e5ad3..0f76cca32a1c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -913,7 +913,7 @@ adjust_va_to_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size, enum fit_type type) { - struct vmap_area *lva; + struct vmap_area *lva = NULL; if (type == FL_FIT_TYPE) { /* @@ -972,7 +972,7 @@ adjust_va_to_fit_type(struct vmap_area *va, if (type != FL_FIT_TYPE) { augment_tree_propagate_from(va); - if (type == NE_FIT_TYPE) + if (lva) /* type == NE_FIT_TYPE */ insert_vmap_area_augment(lva, &va->rb_node, &free_vmap_area_root, &free_vmap_area_list); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 7889f583ced9..910e02c793ff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3644,19 +3644,18 @@ out: } /* - * pgdat->kswapd_classzone_idx is the highest zone index that a recent - * allocation request woke kswapd for. When kswapd has not woken recently, - * the value is MAX_NR_ZONES which is not a valid index. This compares a - * given classzone and returns it or the highest classzone index kswapd - * was recently woke for. + * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be + * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not + * a valid index then either kswapd runs for first time or kswapd couldn't sleep + * after previous reclaim attempt (node is still unbalanced). In that case + * return the zone index of the previous kswapd reclaim cycle. */ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, - enum zone_type classzone_idx) + enum zone_type prev_classzone_idx) { if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return classzone_idx; - - return max(pgdat->kswapd_classzone_idx, classzone_idx); + return prev_classzone_idx; + return pgdat->kswapd_classzone_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3797,7 +3796,7 @@ kswapd_try_sleep: /* Read the new order and classzone_idx */ alloc_order = reclaim_order = pgdat->kswapd_order; - classzone_idx = kswapd_classzone_idx(pgdat, 0); + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); pgdat->kswapd_order = 0; pgdat->kswapd_classzone_idx = MAX_NR_ZONES; @@ -3851,8 +3850,12 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, if (!cpuset_zone_allowed(zone, gfp_flags)) return; pgdat = zone->zone_pgdat; - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, - classzone_idx); + + if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) + pgdat->kswapd_classzone_idx = classzone_idx; + else + pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, + classzone_idx); pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 19d27bee285e..1555b0c6f7ec 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -160,10 +160,10 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { - struct lowpan_peer *peer; - struct in6_addr *nexthop; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); int count = atomic_read(&dev->peer_count); + const struct in6_addr *nexthop; + struct lowpan_peer *peer; BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 9f77432dbe38..5406d7cd46ad 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1353,7 +1353,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) * actually encrypted before enforcing a key size. */ return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || - hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE); + hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); } static void l2cap_do_start(struct l2cap_chan *chan) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 16f9159234a2..8c2ec35b6512 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -318,6 +318,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk static int ip_mc_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { + struct rtable *new_rt; int ret; ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); @@ -326,6 +327,17 @@ static int ip_mc_finish_output(struct net *net, struct sock *sk, return ret; } + /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting + * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten, + * see ipv4_pktinfo_prepare(). + */ + new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); + if (new_rt) { + new_rt->rt_iif = 0; + skb_dst_drop(skb); + skb_dst_set(skb, &new_rt->dst); + } + return dev_loopback_xmit(net, sk, skb); } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 0b8e06ca75d6..40a6abbc9cf6 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -197,7 +197,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, - skb->dev->ifindex, sdif); + dif, sdif); } out: read_unlock(&raw_v4_hashinfo.lock); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6cb7cff22db9..8ea0735a6754 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1647,6 +1647,39 @@ struct rtable *rt_dst_alloc(struct net_device *dev, } EXPORT_SYMBOL(rt_dst_alloc); +struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) +{ + struct rtable *new_rt; + + new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, + rt->dst.flags); + + if (new_rt) { + new_rt->rt_genid = rt_genid_ipv4(dev_net(dev)); + new_rt->rt_flags = rt->rt_flags; + new_rt->rt_type = rt->rt_type; + new_rt->rt_is_input = rt->rt_is_input; + new_rt->rt_iif = rt->rt_iif; + new_rt->rt_pmtu = rt->rt_pmtu; + new_rt->rt_mtu_locked = rt->rt_mtu_locked; + new_rt->rt_gw_family = rt->rt_gw_family; + if (rt->rt_gw_family == AF_INET) + new_rt->rt_gw4 = rt->rt_gw4; + else if (rt->rt_gw_family == AF_INET6) + new_rt->rt_gw6 = rt->rt_gw6; + INIT_LIST_HEAD(&new_rt->rt_uncached); + + new_rt->dst.flags |= DST_HOST; + new_rt->dst.input = rt->dst.input; + new_rt->dst.output = rt->dst.output; + new_rt->dst.error = rt->dst.error; + new_rt->dst.lastuse = jiffies; + new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate); + } + return new_rt; +} +EXPORT_SYMBOL(rt_dst_clone); + /* called in rcu_read_lock() section */ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 834475717110..21efcd02f337 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -59,8 +59,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; + const struct in6_addr *nexthop; struct neighbour *neigh; - struct in6_addr *nexthop; int ret; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 11ad62effd56..97a843cf164c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -218,7 +218,8 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst, { const struct rt6_info *rt = container_of(dst, struct rt6_info, dst); - return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr); + return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any), + dst->dev, skb, daddr); } static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) @@ -5281,7 +5282,7 @@ static struct ctl_table ipv6_route_table_template[] = { .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 241317473114..cdfc33517e85 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -439,9 +439,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, struct nf_flowtable *flow_table = priv; struct flow_offload_tuple tuple = {}; enum flow_offload_tuple_dir dir; + const struct in6_addr *nexthop; struct flow_offload *flow; struct net_device *outdev; - struct in6_addr *nexthop; struct ipv6hdr *ip6h; struct rt6_info *rt; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a29d66da7394..5f78df080573 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2401,6 +2401,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb) ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); + + if (!packet_read_pending(&po->tx_ring)) + complete(&po->skb_completion); } sock_wfree(skb); @@ -2585,7 +2588,7 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame, static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; @@ -2600,6 +2603,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; + long timeo = 0; mutex_lock(&po->pg_vec_lock); @@ -2646,12 +2650,21 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; + reinit_completion(&po->skb_completion); + do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { - if (need_wait && need_resched()) - schedule(); + if (need_wait && skb) { + timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); + timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); + if (timeo <= 0) { + err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; + goto out_put; + } + } + /* check for additional frames */ continue; } @@ -3207,6 +3220,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, sock_init_data(sock, sk); po = pkt_sk(sk); + init_completion(&po->skb_completion); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; @@ -4314,7 +4328,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, req3->tp_sizeof_priv || req3->tp_feature_req_word) { err = -EINVAL; - goto out; + goto out_free_pg_vec; } } break; @@ -4378,6 +4392,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, prb_shutdown_retire_blk_timer(po, rb_queue); } +out_free_pg_vec: if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: diff --git a/net/packet/internal.h b/net/packet/internal.h index 3bb7c5fb3bff..c70a2794456f 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -128,6 +128,7 @@ struct packet_sock { unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_tstamp; + struct completion skb_completion; struct net_device __rcu *cached_dev; int (*xmit)(struct sk_buff *skb); struct packet_type prot_hook ____cacheline_aligned_in_smp; diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e16a3d37d2bc..732e109c3055 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -549,12 +549,17 @@ static struct notifier_block cbs_device_notifier = { static int __init cbs_module_init(void) { - int err = register_netdevice_notifier(&cbs_device_notifier); + int err; + err = register_netdevice_notifier(&cbs_device_notifier); if (err) return err; - return register_qdisc(&cbs_qdisc_ops); + err = register_qdisc(&cbs_qdisc_ops); + if (err) + unregister_netdevice_notifier(&cbs_device_notifier); + + return err; } static void __exit cbs_module_exit(void) diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e358437ba29b..69cebb2c998b 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -118,10 +118,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); - /* Remember who we are attached to. */ - ep->base.sk = sk; - sock_hold(ep->base.sk); - /* Create the lists of associations. */ INIT_LIST_HEAD(&ep->asocs); @@ -154,6 +150,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ep->prsctp_enable = net->sctp.prsctp_enable; ep->reconf_enable = net->sctp.reconf_enable; + /* Remember who we are attached to. */ + ep->base.sk = sk; + sock_hold(ep->base.sk); + return ep; nomem_shkey: diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 0c874e996f85..7621ec2f539c 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2029,7 +2029,7 @@ static int __init smc_init(void) rc = smc_pnet_init(); if (rc) - return rc; + goto out_pernet_subsys; rc = smc_llc_init(); if (rc) { @@ -2080,6 +2080,9 @@ out_proto: proto_unregister(&smc_proto); out_pnet: smc_pnet_exit(); +out_pernet_subsys: + unregister_pernet_subsys(&smc_net_ops); + return rc; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2d2850adc2a3..4ca50ddf8d16 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -652,7 +652,10 @@ create: rc = smc_lgr_create(smc, ini); if (rc) goto out; + lgr = conn->lgr; + write_lock_bh(&lgr->conns_lock); smc_lgr_register_conn(conn); /* add smc conn to lgr */ + write_unlock_bh(&lgr->conns_lock); } conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 027a3b07d329..0004535c0188 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -211,9 +211,14 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, /* Save client advertised inbound read limit for use later in accept. */ newxprt->sc_ord = param->initiator_depth; - /* Set the local and remote addresses in the transport */ sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); + /* The remote port is arbitrary and not under the control of the + * client ULP. Set it to a fixed value so that the DRC continues + * to be effective after a reconnect. + */ + rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0); + sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c69951ed2ebc..36652352a38c 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -950,6 +950,8 @@ static int xs_local_send_request(struct rpc_rqst *req) struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct xdr_buf *xdr = &req->rq_snd_buf; + rpc_fraghdr rm = xs_stream_record_marker(xdr); + unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; int status; int sent = 0; @@ -964,9 +966,7 @@ static int xs_local_send_request(struct rpc_rqst *req) req->rq_xtime = ktime_get(); status = xs_sendpages(transport->sock, NULL, 0, xdr, - transport->xmit.offset, - xs_stream_record_marker(xdr), - &sent); + transport->xmit.offset, rm, &sent); dprintk("RPC: %s(%u) = %d\n", __func__, xdr->len - transport->xmit.offset, status); @@ -976,7 +976,7 @@ static int xs_local_send_request(struct rpc_rqst *req) if (likely(sent > 0) || status == 0) { transport->xmit.offset += sent; req->rq_bytes_sent = transport->xmit.offset; - if (likely(req->rq_bytes_sent >= req->rq_slen)) { + if (likely(req->rq_bytes_sent >= msglen)) { req->rq_xmit_bytes_sent += transport->xmit.offset; transport->xmit.offset = 0; return 0; @@ -1097,6 +1097,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct xdr_buf *xdr = &req->rq_snd_buf; + rpc_fraghdr rm = xs_stream_record_marker(xdr); + unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; bool vm_wait = false; int status; int sent; @@ -1122,9 +1124,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) while (1) { sent = 0; status = xs_sendpages(transport->sock, NULL, 0, xdr, - transport->xmit.offset, - xs_stream_record_marker(xdr), - &sent); + transport->xmit.offset, rm, &sent); dprintk("RPC: xs_tcp_send_request(%u) = %d\n", xdr->len - transport->xmit.offset, status); @@ -1133,7 +1133,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) * reset the count of bytes sent. */ transport->xmit.offset += sent; req->rq_bytes_sent = transport->xmit.offset; - if (likely(req->rq_bytes_sent >= req->rq_slen)) { + if (likely(req->rq_bytes_sent >= msglen)) { req->rq_xmit_bytes_sent += transport->xmit.offset; transport->xmit.offset = 0; return 0; diff --git a/net/tipc/core.c b/net/tipc/core.c index ed536c05252a..c8370722f0bb 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -134,7 +134,7 @@ static int __init tipc_init(void) if (err) goto out_sysctl; - err = register_pernet_subsys(&tipc_net_ops); + err = register_pernet_device(&tipc_net_ops); if (err) goto out_pernet; @@ -142,7 +142,7 @@ static int __init tipc_init(void) if (err) goto out_socket; - err = register_pernet_subsys(&tipc_topsrv_net_ops); + err = register_pernet_device(&tipc_topsrv_net_ops); if (err) goto out_pernet_topsrv; @@ -153,11 +153,11 @@ static int __init tipc_init(void) pr_info("Started in single node mode\n"); return 0; out_bearer: - unregister_pernet_subsys(&tipc_topsrv_net_ops); + unregister_pernet_device(&tipc_topsrv_net_ops); out_pernet_topsrv: tipc_socket_stop(); out_socket: - unregister_pernet_subsys(&tipc_net_ops); + unregister_pernet_device(&tipc_net_ops); out_pernet: tipc_unregister_sysctl(); out_sysctl: @@ -172,9 +172,9 @@ out_netlink: static void __exit tipc_exit(void) { tipc_bearer_cleanup(); - unregister_pernet_subsys(&tipc_topsrv_net_ops); + unregister_pernet_device(&tipc_topsrv_net_ops); tipc_socket_stop(); - unregister_pernet_subsys(&tipc_net_ops); + unregister_pernet_device(&tipc_net_ops); tipc_netlink_stop(); tipc_netlink_compat_stop(); tipc_unregister_sysctl(); diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index c6a04c09d075..cf155061c472 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -445,7 +445,11 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, if (!bearer) return -EMSGSIZE; - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); + len = TLV_GET_DATA_LEN(msg->req); + if (len <= 0) + return -EINVAL; + + len = min_t(int, len, TIPC_MAX_BEARER_NAME); if (!string_is_valid(name, len)) return -EINVAL; @@ -539,7 +543,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, name = (char *)TLV_DATA(msg->req); - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); + len = TLV_GET_DATA_LEN(msg->req); + if (len <= 0) + return -EINVAL; + + len = min_t(int, len, TIPC_MAX_BEARER_NAME); if (!string_is_valid(name, len)) return -EINVAL; @@ -817,7 +825,11 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, if (!link) return -EMSGSIZE; - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); + len = TLV_GET_DATA_LEN(msg->req); + if (len <= 0) + return -EINVAL; + + len = min_t(int, len, TIPC_MAX_BEARER_NAME); if (!string_is_valid(name, len)) return -EINVAL; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index fc81ae18cc44..e2b69e805d46 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -279,7 +279,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) goto skip_tx_cleanup; } - if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) + if (unlikely(sk->sk_write_pending) && + !wait_on_pending_writer(sk, &timeo)) tls_handle_open_record(sk, 0); /* We need these for tls_sw_fallback handling of other packets */ diff --git a/samples/pidfd/pidfd-metadata.c b/samples/pidfd/pidfd-metadata.c index 14b454448429..c459155daf9a 100644 --- a/samples/pidfd/pidfd-metadata.c +++ b/samples/pidfd/pidfd-metadata.c @@ -83,7 +83,7 @@ static int pidfd_metadata_fd(pid_t pid, int pidfd) int main(int argc, char *argv[]) { - int pidfd = 0, ret = EXIT_FAILURE; + int pidfd = -1, ret = EXIT_FAILURE; char buf[4096] = { 0 }; pid_t pid; int procfd, statusfd; @@ -91,7 +91,11 @@ int main(int argc, char *argv[]) pid = pidfd_clone(CLONE_PIDFD, &pidfd); if (pid < 0) - exit(ret); + err(ret, "CLONE_PIDFD"); + if (pidfd == -1) { + warnx("CLONE_PIDFD is not supported by the kernel"); + goto out; + } procfd = pidfd_metadata_fd(pid, pidfd); close(pidfd); diff --git a/sound/core/seq/oss/seq_oss_ioctl.c b/sound/core/seq/oss/seq_oss_ioctl.c index 96ad01fb668c..ccf682689ec9 100644 --- a/sound/core/seq/oss/seq_oss_ioctl.c +++ b/sound/core/seq/oss/seq_oss_ioctl.c @@ -49,7 +49,7 @@ static int snd_seq_oss_oob_user(struct seq_oss_devinfo *dp, void __user *arg) if (copy_from_user(ev, arg, 8)) return -EFAULT; memset(&tmpev, 0, sizeof(tmpev)); - snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.port, dp->addr.client); + snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.client, dp->addr.port); tmpev.time.tick = 0; if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) { snd_seq_oss_dispatch(dp, &tmpev, 0, 0); diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c index 79ef430e56e1..537d5f423e20 100644 --- a/sound/core/seq/oss/seq_oss_rw.c +++ b/sound/core/seq/oss/seq_oss_rw.c @@ -161,7 +161,7 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt) memset(&event, 0, sizeof(event)); /* set dummy -- to be sure */ event.type = SNDRV_SEQ_EVENT_NOTEOFF; - snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); + snd_seq_oss_fill_addr(dp, &event, dp->addr.client, dp->addr.port); if (snd_seq_oss_process_event(dp, rec, &event)) return 0; /* invalid event - no need to insert queue */ diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c index cc6eb30f03a2..71168728940a 100644 --- a/sound/firewire/amdtp-am824.c +++ b/sound/firewire/amdtp-am824.c @@ -320,7 +320,7 @@ static void read_midi_messages(struct amdtp_stream *s, u8 *b; for (f = 0; f < frames; f++) { - port = (s->data_block_counter + f) % 8; + port = (8 - s->tx_first_dbc + s->data_block_counter + f) % 8; b = (u8 *)&buffer[p->midi_position]; len = b[0] - 0x80; diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 6907dbefd08c..3842f9d34b7c 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -400,27 +400,33 @@ static void setup_fg_nodes(struct hdac_device *codec) int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs) { hda_nid_t start_nid; - int nums, err; + int nums, err = 0; + /* + * Serialize against multiple threads trying to update the sysfs + * widgets array. + */ + mutex_lock(&codec->widget_lock); nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid); if (!start_nid || nums <= 0 || nums >= 0xff) { dev_err(&codec->dev, "cannot read sub nodes for FG 0x%02x\n", codec->afg); - return -EINVAL; + err = -EINVAL; + goto unlock; } if (sysfs) { - mutex_lock(&codec->widget_lock); err = hda_widget_sysfs_reinit(codec, start_nid, nums); - mutex_unlock(&codec->widget_lock); if (err < 0) - return err; + goto unlock; } codec->num_nodes = nums; codec->start_nid = start_nid; codec->end_nid = start_nid + nums; - return 0; +unlock: + mutex_unlock(&codec->widget_lock); + return err; } EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5b3c26991f26..6f3a35949cdd 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2448,9 +2448,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), - SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), - SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), @@ -7074,6 +7075,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3111, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c index 21127e4958b2..2c03e0f6bf72 100644 --- a/sound/usb/line6/pcm.c +++ b/sound/usb/line6/pcm.c @@ -556,6 +556,11 @@ int line6_init_pcm(struct usb_line6 *line6, line6pcm->max_packet_size_out = usb_maxpacket(line6->usbdev, usb_sndisocpipe(line6->usbdev, ep_write), 1); + if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) { + dev_err(line6pcm->line6->ifcdev, + "cannot get proper max packet size\n"); + return -EINVAL; + } spin_lock_init(&line6pcm->out.lock); spin_lock_init(&line6pcm->in.lock); diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 1f6011f36bb0..199fa157a411 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -741,7 +741,7 @@ static int snd_ni_control_init_val(struct usb_mixer_interface *mixer, return err; } - kctl->private_value |= (value << 24); + kctl->private_value |= ((unsigned int)value << 24); return 0; } @@ -902,7 +902,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer, if (err < 0) return err; - kctl->private_value |= value[0] << 24; + kctl->private_value |= (unsigned int)value[0] << 24; return 0; } diff --git a/tools/arch/x86/include/uapi/asm/perf_regs.h b/tools/arch/x86/include/uapi/asm/perf_regs.h index ac67bbea10ca..7c9d2bb3833b 100644 --- a/tools/arch/x86/include/uapi/asm/perf_regs.h +++ b/tools/arch/x86/include/uapi/asm/perf_regs.h @@ -52,4 +52,7 @@ enum perf_event_x86_regs { /* These include both GPRs and XMMX registers */ PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, }; + +#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) + #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h index b7cd91a9014f..b7321337d100 100644 --- a/tools/perf/arch/x86/include/perf_regs.h +++ b/tools/perf/arch/x86/include/perf_regs.h @@ -9,7 +9,6 @@ void perf_regs_load(u64 *regs); #define PERF_REGS_MAX PERF_REG_X86_XMM_MAX -#define PERF_XMM_REGS_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) #ifndef HAVE_ARCH_X86_64_SUPPORT #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 7886ca5263e3..3666c0076df9 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -277,7 +277,7 @@ uint64_t arch__intr_reg_mask(void) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .sample_type = PERF_SAMPLE_REGS_INTR, - .sample_regs_intr = PERF_XMM_REGS_MASK, + .sample_regs_intr = PERF_REG_EXTENDED_MASK, .precise_ip = 1, .disabled = 1, .exclude_kernel = 1, @@ -293,7 +293,7 @@ uint64_t arch__intr_reg_mask(void) fd = sys_perf_event_open(&attr, 0, -1, -1, 0); if (fd != -1) { close(fd); - return (PERF_XMM_REGS_MASK | PERF_REGS_MASK); + return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK); } return PERF_REGS_MASK; diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 698c08f851b8..8995092d541e 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -279,6 +279,51 @@ static void idr_align_test(struct idr *idr) } } +DEFINE_IDR(find_idr); + +static void *idr_throbber(void *arg) +{ + time_t start = time(NULL); + int id = *(int *)arg; + + rcu_register_thread(); + do { + idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL); + idr_remove(&find_idr, id); + } while (time(NULL) < start + 10); + rcu_unregister_thread(); + + return NULL; +} + +void idr_find_test_1(int anchor_id, int throbber_id) +{ + pthread_t throbber; + time_t start = time(NULL); + + pthread_create(&throbber, NULL, idr_throbber, &throbber_id); + + BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id, + anchor_id + 1, GFP_KERNEL) != anchor_id); + + do { + int id = 0; + void *entry = idr_get_next(&find_idr, &id); + BUG_ON(entry != xa_mk_value(id)); + } while (time(NULL) < start + 11); + + pthread_join(throbber, NULL); + + idr_remove(&find_idr, anchor_id); + BUG_ON(!idr_is_empty(&find_idr)); +} + +void idr_find_test(void) +{ + idr_find_test_1(100000, 0); + idr_find_test_1(0, 100000); +} + void idr_checks(void) { unsigned long i; @@ -360,6 +405,7 @@ void idr_checks(void) idr_u32_test(1); idr_u32_test(0); idr_align_test(&idr); + idr_find_test(); } #define module_init(x) diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index b38260e29775..241919ef1eac 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -146,6 +146,7 @@ int main(int argc, char *argv[]) kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID, 0, 0); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); free(state); diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index ba919308fe30..d503b8764a8e 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -3,4 +3,5 @@ subpage_prot tempfile prot_sao segv_errors -wild_bctr
\ No newline at end of file +wild_bctr +large_vm_fork_separation
\ No newline at end of file diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 43d68420e363..f1fbc15800c4 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -2,7 +2,8 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ + large_vm_fork_separation TEST_GEN_FILES := tempfile top_srcdir = ../../../../.. @@ -13,6 +14,7 @@ $(TEST_GEN_PROGS): ../harness.c $(OUTPUT)/prot_sao: ../utils.c $(OUTPUT)/wild_bctr: CFLAGS += -m64 +$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/tempfile: dd if=/dev/zero of=$@ bs=64k count=1 diff --git a/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c new file mode 100644 index 000000000000..2363a7f3ab0d --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright 2019, Michael Ellerman, IBM Corp. +// +// Test that allocating memory beyond the memory limit and then forking is +// handled correctly, ie. the child is able to access the mappings beyond the +// memory limit and the child's writes are not visible to the parent. + +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "utils.h" + + +#ifndef MAP_FIXED_NOREPLACE +#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB +#endif + + +static int test(void) +{ + int p2c[2], c2p[2], rc, status, c, *p; + unsigned long page_size; + pid_t pid; + + page_size = sysconf(_SC_PAGESIZE); + SKIP_IF(page_size != 65536); + + // Create a mapping at 512TB to allocate an extended_id + p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0); + if (p == MAP_FAILED) { + perror("mmap"); + printf("Error: couldn't mmap(), confirm kernel has 4TB support?\n"); + return 1; + } + + printf("parent writing %p = 1\n", p); + *p = 1; + + FAIL_IF(pipe(p2c) == -1 || pipe(c2p) == -1); + + pid = fork(); + if (pid == 0) { + FAIL_IF(read(p2c[0], &c, 1) != 1); + + pid = getpid(); + printf("child writing %p = %d\n", p, pid); + *p = pid; + + FAIL_IF(write(c2p[1], &c, 1) != 1); + FAIL_IF(read(p2c[0], &c, 1) != 1); + exit(0); + } + + c = 0; + FAIL_IF(write(p2c[1], &c, 1) != 1); + FAIL_IF(read(c2p[0], &c, 1) != 1); + + // Prevent compiler optimisation + barrier(); + + rc = 0; + printf("parent reading %p = %d\n", p, *p); + if (*p != 1) { + printf("Error: BUG! parent saw child's write! *p = %d\n", *p); + rc = 1; + } + + FAIL_IF(write(p2c[1], &c, 1) != 1); + FAIL_IF(waitpid(pid, &status, 0) == -1); + FAIL_IF(!WIFEXITED(status) || WEXITSTATUS(status)); + + if (rc == 0) + printf("success: test completed OK\n"); + + return rc; +} + +int main(void) +{ + return test_harness(test, "large_vm_fork_separation"); +} |