diff options
298 files changed, 3282 insertions, 1637 deletions
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt index ad5a02f2ac8c..cd1bf2ac9055 100644 --- a/Documentation/devicetree/bindings/mtd/tango-nand.txt +++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt @@ -5,7 +5,7 @@ Required properties: - compatible: "sigma,smp8758-nand" - reg: address/size of nfc_reg, nfc_mem, and pbus_reg - dmas: reference to the DMA channel used by the controller -- dma-names: "nfc_sbox" +- dma-names: "rxtx" - clocks: reference to the system clock - #address-cells: <1> - #size-cells: <0> @@ -17,9 +17,9 @@ Example: nandc: nand-controller@2c000 { compatible = "sigma,smp8758-nand"; - reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>; + reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>; dmas = <&dma0 3>; - dma-names = "nfc_sbox"; + dma-names = "rxtx"; clocks = <&clkgen SYS_CLK>; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt index 85bf945b898f..afe9630a5e7d 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83867.txt +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -3,9 +3,11 @@ Required properties: - reg - The ID number for the phy, usually a small integer - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h for applicable values diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index da6614c63796..dc975064fa27 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -1,17 +1,23 @@ Renesas MSIOF spi controller Required properties: -- compatible : "renesas,msiof-<soctype>" for SoCs, - "renesas,sh-msiof" for SuperH, or - "renesas,sh-mobile-msiof" for SH Mobile series. - Examples with soctypes are: - "renesas,msiof-r8a7790" (R-Car H2) +- compatible : "renesas,msiof-r8a7790" (R-Car H2) "renesas,msiof-r8a7791" (R-Car M2-W) "renesas,msiof-r8a7792" (R-Car V2H) "renesas,msiof-r8a7793" (R-Car M2-N) "renesas,msiof-r8a7794" (R-Car E2) "renesas,msiof-r8a7796" (R-Car M3-W) "renesas,msiof-sh73a0" (SH-Mobile AG5) + "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device) + "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device) + "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device) + "renesas,sh-msiof" (deprecated) + + When compatible with the generic version, nodes + must list the SoC-specific version corresponding + to the platform first followed by the generic + version. + - reg : A list of offsets and lengths of the register sets for the device. If only one register set is present, it is to be used @@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*. Example: msiof0: spi@e6e20000 { - compatible = "renesas,msiof-r8a7791"; + compatible = "renesas,msiof-r8a7791", + "renesas,rcar-gen2-msiof"; reg = <0 0xe6e20000 0 0x0064>; interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>; diff --git a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt index 718386a63b5c..1d7c3bc677f7 100644 --- a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt +++ b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt @@ -10,7 +10,7 @@ Required properties: - vusb33-supply : regulator of USB avdd3.3v - clocks : a list of phandle + clock-specifier pairs, one for each entry in clock-names - - clock-names : must contain "sys_ck" for clock of controller; + - clock-names : must contain "sys_ck" and "ref_ck" for clock of controller; "wakeup_deb_p0" and "wakeup_deb_p1" are optional, they are depends on "mediatek,enable-wakeup" - phys : a list of phandle + phy specifier pairs @@ -56,10 +56,10 @@ ssusb: usb@11271000 { phys = <&phy_port0 PHY_TYPE_USB3>, <&phy_port1 PHY_TYPE_USB2>; power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>; - clocks = <&topckgen CLK_TOP_USB30_SEL>, + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>, <&pericfg CLK_PERI_USB0>, <&pericfg CLK_PERI_USB1>; - clock-names = "sys_ck", + clock-names = "sys_ck", "ref_ck", "wakeup_deb_p0", "wakeup_deb_p1"; vusb33-supply = <&mt6397_vusb_reg>; @@ -79,8 +79,8 @@ ssusb: usb@11271000 { reg-names = "mac"; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>; power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>; - clocks = <&topckgen CLK_TOP_USB30_SEL>; - clock-names = "sys_ck"; + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>; + clock-names = "sys_ck", "ref_ck"; vusb33-supply = <&mt6397_vusb_reg>; status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt index 7ea39710621e..0acfc8acbea1 100644 --- a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt +++ b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt @@ -23,6 +23,7 @@ Required properties: entry in clock-names - clock-names : must contain "sys_ck": for clock of xHCI MAC + "ref_ck": for reference clock of xHCI MAC "wakeup_deb_p0": for USB wakeup debounce clock of port0 "wakeup_deb_p1": for USB wakeup debounce clock of port1 @@ -47,10 +48,10 @@ usb30: usb@11270000 { reg-names = "mac", "ippc"; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>; power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>; - clocks = <&topckgen CLK_TOP_USB30_SEL>, + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>, <&pericfg CLK_PERI_USB0>, <&pericfg CLK_PERI_USB1>; - clock-names = "sys_ck", + clock-names = "sys_ck", "ref_ck", "wakeup_deb_p0", "wakeup_deb_p1"; phys = <&phy_port0 PHY_TYPE_USB3>, @@ -82,6 +83,7 @@ Required properties: entry in clock-names - clock-names : must be "sys_ck": for clock of xHCI MAC + "ref_ck": for reference clock of xHCI MAC Optional properties: - vbus-supply : reference to the VBUS regulator; @@ -94,8 +96,8 @@ usb30: usb@11270000 { reg-names = "mac"; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>; power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>; - clocks = <&topckgen CLK_TOP_USB30_SEL>; - clock-names = "sys_ck"; + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>; + clock-names = "sys_ck", "ref_ck"; vusb33-supply = <&mt6397_vusb_reg>; usb3-lpm-capable; }; diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index 0b7d8576001c..2d80b60eeabe 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -27,6 +27,7 @@ Required properties: Optional properties: - clocks: reference to a clock - usb3-lpm-capable: determines if platform is USB3 LPM capable + - quirk-broken-port-ped: set if the controller has broken port disable mechanism Example: usb@f0931000 { diff --git a/MAINTAINERS b/MAINTAINERS index c36976d3bd1a..26edd832c64e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -976,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.armlinux.org.uk/ S: Maintained +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git F: arch/arm/ ARM SUB-ARCHITECTURES @@ -1153,6 +1154,7 @@ ARM/CLKDEV SUPPORT M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev F: arch/arm/include/asm/clkdev.h F: drivers/clk/clkdev.c @@ -1688,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org> R: Javier Martinez Canillas <javier@osg.samsung.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) +Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ S: Maintained F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/s5p* @@ -7697,8 +7700,10 @@ F: drivers/net/dsa/mv88e6xxx/ F: Documentation/devicetree/bindings/net/dsa/marvell.txt MARVELL ARMADA DRM SUPPORT -M: Russell King <rmk+kernel@armlinux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes F: drivers/gpu/drm/armada/ F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/ @@ -8903,8 +8908,10 @@ S: Supported F: drivers/nfc/nxp-nci NXP TDA998X DRM DRIVER -M: Russell King <rmk+kernel@armlinux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Supported +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel +T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h @@ -1,8 +1,8 @@ VERSION = 4 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc4 -NAME = Roaring Lionus +EXTRAVERSION = -rc5 +NAME = Anniversary Edition # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c75d29077e4a..283099c9560a 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -29,7 +29,7 @@ config ARC select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_MEMBLOCK - select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND + select HAVE_MOD_ARCH_SPECIFIC select HAVE_OPROFILE select HAVE_PERF_EVENTS select HANDLE_DOMAIN_IRQ diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index b3410ff6a62d..5008021fba98 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_IC_PTAG_HI 0x1F /* Bit val in IC_CTRL */ -#define IC_CTRL_CACHE_DISABLE 0x1 +#define IC_CTRL_DIS 0x1 /* Data cache related Auxiliary registers */ #define ARC_REG_DC_BCR 0x72 /* Build Config reg */ @@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_DC_PTAG_HI 0x5F /* Bit val in DC_CTRL */ -#define DC_CTRL_INV_MODE_FLUSH 0x40 -#define DC_CTRL_FLUSH_STATUS 0x100 +#define DC_CTRL_DIS 0x001 +#define DC_CTRL_INV_MODE_FLUSH 0x040 +#define DC_CTRL_FLUSH_STATUS 0x100 /*System-level cache (L2 cache) related Auxiliary registers */ #define ARC_REG_SLC_CFG 0x901 @@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_SLC_RGN_END 0x916 /* Bit val in SLC_CONTROL */ +#define SLC_CTRL_DIS 0x001 #define SLC_CTRL_IM 0x040 -#define SLC_CTRL_DISABLE 0x001 #define SLC_CTRL_BUSY 0x100 #define SLC_CTRL_RGN_OP_INV 0x200 diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index b5ff87e6f4b7..aee1a77934cf 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -16,6 +16,7 @@ ; ; Now manually save: r12, sp, fp, gp, r25 + PUSH r30 PUSH r12 ; Saving pt_regs->sp correctly requires some extra work due to the way @@ -72,6 +73,7 @@ POPAX AUX_USER_SP 1: POP r12 + POP r30 .endm diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h index 6e91d8b339c3..567590ea8f6c 100644 --- a/arch/arc/include/asm/module.h +++ b/arch/arc/include/asm/module.h @@ -14,13 +14,13 @@ #include <asm-generic/module.h> -#ifdef CONFIG_ARC_DW2_UNWIND struct mod_arch_specific { +#ifdef CONFIG_ARC_DW2_UNWIND void *unw_info; int unw_sec_idx; +#endif const char *secstr; }; -#endif #define MODULE_PROC_FAMILY "ARC700" diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 69095da1fcfd..47111d565a95 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -84,7 +84,7 @@ struct pt_regs { unsigned long fp; unsigned long sp; /* user/kernel sp depending on where we came from */ - unsigned long r12; + unsigned long r12, r30; /*------- Below list auto saved by h/w -----------*/ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index cb954cdab070..c568a9df82b1 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -31,6 +31,7 @@ extern int root_mountflags, end_mem; void setup_processor(void); void __init setup_arch_memory(void); +long __init arc_get_mem_sz(void); /* Helpers used in arc_*_mumbojumbo routines */ #define IS_AVAIL1(v, s) ((v) ? s : "") diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 994dca7014db..ecef0fb0b66c 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -77,20 +77,20 @@ void arc_init_IRQ(void) static void arcv2_irq_mask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 0); } static void arcv2_irq_unmask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 1); } void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); /* diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index ce9deb953ca9..8c1fd5c00782 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb &= ~(1 << data->irq); + ienb &= ~(1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } @@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb |= (1 << data->irq); + ienb |= (1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 560c4afc2af4..9274f8ade8c7 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/spinlock.h> #include <soc/arc/mcip.h> #include <asm/irqflags-arcv2.h> @@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq; static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); + struct irq_chip *core_chip = irq_desc_get_chip(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; + chained_irq_enter(core_chip, desc); generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); + chained_irq_exit(core_chip, desc); } static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c index 42e964db2967..3d99a6091332 100644 --- a/arch/arc/kernel/module.c +++ b/arch/arc/kernel/module.c @@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, #ifdef CONFIG_ARC_DW2_UNWIND mod->arch.unw_sec_idx = 0; mod->arch.unw_info = NULL; - mod->arch.secstr = secstr; #endif + mod->arch.secstr = secstr; return 0; } @@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, } +#ifdef CONFIG_ARC_DW2_UNWIND if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) module->arch.unw_sec_idx = tgtsec; +#endif return 0; diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index ec86ac0e3321..d408fa21a07c 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -23,7 +23,7 @@ static int l2_line_sz; static int ioc_exists; -int slc_enable = 1, ioc_enable = 0; +int slc_enable = 1, ioc_enable = 1; unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ @@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, /* * For ARC700 MMUv3 I-cache and D-cache flushes - * Also reused for HS38 aliasing I-cache configuration + * - ARC700 programming model requires paddr and vaddr be passed in seperate + * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the + * caches actually alias or not. + * - For HS38, only the aliasing I-cache configuration uses the PTAG reg + * (non aliasing I-cache version doesn't; while D-cache can't possibly alias) */ static inline void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, @@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op) __after_dc_op(op); } +static inline void __dc_disable(void) +{ + const int r = ARC_REG_DC_CTRL; + + __dc_entire_op(OP_FLUSH_N_INV); + write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS); +} + +static void __dc_enable(void) +{ + const int r = ARC_REG_DC_CTRL; + + write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS); +} + /* For kernel mappings cache operation: index is same as paddr */ #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) @@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, #else #define __dc_entire_op(op) +#define __dc_disable() +#define __dc_enable() #define __dc_line_op(paddr, vaddr, sz, op) #define __dc_line_op_k(paddr, sz, op) @@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) #endif } +noinline static void slc_entire_op(const int op) +{ + unsigned int ctrl, r = ARC_REG_SLC_CTRL; + + ctrl = read_aux_reg(r); + + if (!(op & OP_FLUSH)) /* i.e. OP_INV */ + ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ + else + ctrl |= SLC_CTRL_IM; + + write_aux_reg(r, ctrl); + + write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); + + /* Important to wait for flush to complete */ + while (read_aux_reg(r) & SLC_CTRL_BUSY); +} + +static inline void arc_slc_disable(void) +{ + const int r = ARC_REG_SLC_CTRL; + + slc_entire_op(OP_FLUSH_N_INV); + write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS); +} + +static inline void arc_slc_enable(void) +{ + const int r = ARC_REG_SLC_CTRL; + + write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS); +} + /*********************************************************** * Exported APIs */ @@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) return 0; } -void arc_cache_init(void) +/* + * IO-Coherency (IOC) setup rules: + * + * 1. Needs to be at system level, so only once by Master core + * Non-Masters need not be accessing caches at that time + * - They are either HALT_ON_RESET and kick started much later or + * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot() + * doesn't perturb caches or coherency unit + * + * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC, + * otherwise any straggler data might behave strangely post IOC enabling + * + * 3. All Caches need to be disabled when setting up IOC to elide any in-flight + * Coherency transactions + */ +noinline void __init arc_ioc_setup(void) { - unsigned int __maybe_unused cpu = smp_processor_id(); - char str[256]; + unsigned int ap_sz; - printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + /* Flush + invalidate + disable L1 dcache */ + __dc_disable(); + + /* Flush + invalidate SLC */ + if (read_aux_reg(ARC_REG_SLC_BCR)) + slc_entire_op(OP_FLUSH_N_INV); + + /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */ + write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); /* - * Only master CPU needs to execute rest of function: - * - Assume SMP so all cores will have same cache config so - * any geomtry checks will be same for all - * - IOC setup / dma callbacks only need to be setup once + * IOC Aperture size: + * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M + * TBD: fix for PGU + 1GB of low mem + * TBD: fix for PAE */ - if (cpu) - return; + ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2; + write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz); + + write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); + write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); + + /* Re-enable L1 dcache */ + __dc_enable(); +} + +void __init arc_cache_init_master(void) +{ + unsigned int __maybe_unused cpu = smp_processor_id(); if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; @@ -985,30 +1073,14 @@ void arc_cache_init(void) } } - if (is_isa_arcv2() && l2_line_sz && !slc_enable) { - - /* IM set : flush before invalidate */ - write_aux_reg(ARC_REG_SLC_CTRL, - read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); + /* Note that SLC disable not formally supported till HS 3.0 */ + if (is_isa_arcv2() && l2_line_sz && !slc_enable) + arc_slc_disable(); - write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); - - /* Important to wait for flush to complete */ - while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); - write_aux_reg(ARC_REG_SLC_CTRL, - read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); - } + if (is_isa_arcv2() && ioc_enable) + arc_ioc_setup(); if (is_isa_arcv2() && ioc_enable) { - /* IO coherency base - 0x8z */ - write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); - /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ - write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); - /* Enable partial writes */ - write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); - /* Enable IO coherency */ - write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); - __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; __dma_cache_inv = __dma_cache_inv_ioc; __dma_cache_wback = __dma_cache_wback_ioc; @@ -1022,3 +1094,20 @@ void arc_cache_init(void) __dma_cache_wback = __dma_cache_wback_l1; } } + +void __ref arc_cache_init(void) +{ + unsigned int __maybe_unused cpu = smp_processor_id(); + char str[256]; + + printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + + /* + * Only master CPU needs to execute rest of function: + * - Assume SMP so all cores will have same cache config so + * any geomtry checks will be same for all + * - IOC setup / dma callbacks only need to be setup once + */ + if (!cpu) + arc_cache_init_master(); +} diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 399e2f223d25..8c9415ed6280 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); #endif +long __init arc_get_mem_sz(void) +{ + return low_mem_sz; +} + /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ static int __init setup_mem_sz(char *str) { diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 7327250f0bb6..f10fe8526239 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \ sun8i-a83t-allwinner-h8homlet-v2.dtb \ sun8i-a83t-cubietruck-plus.dtb \ sun8i-h3-bananapi-m2-plus.dtb \ + sun8i-h3-nanopi-m1.dtb \ sun8i-h3-nanopi-neo.dtb \ sun8i-h3-orangepi-2.dtb \ sun8i-h3-orangepi-lite.dtb \ diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts index 1463df3b5b19..8ed46f9d79b7 100644 --- a/arch/arm/boot/dts/am335x-icev2.dts +++ b/arch/arm/boot/dts/am335x-icev2.dts @@ -170,7 +170,6 @@ AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */ AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */ AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */ - AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */ >; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index b6142bda661e..15f07f9af3b3 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -160,7 +160,7 @@ axi { compatible = "simple-bus"; - ranges = <0x00000000 0x18000000 0x0011c40a>; + ranges = <0x00000000 0x18000000 0x0011c40c>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index 41de15fe15a2..78492a0bbbab 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -99,6 +99,7 @@ #size-cells = <1>; compatible = "m25p64"; spi-max-frequency = <30000000>; + m25p,fast-read; reg = <0>; partition@0 { label = "U-Boot-SPL"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 1faf24acd521..5ba161679e01 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1378,6 +1378,7 @@ phy-names = "sata-phy"; clocks = <&sata_ref_clk>; ti,hwmods = "sata"; + ports-implemented = <0x1>; }; rtc: rtc@48838000 { diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts index c3d939c9666c..3f808a47df03 100644 --- a/arch/arm/boot/dts/dra72-evm-revc.dts +++ b/arch/arm/boot/dts/dra72-evm-revc.dts @@ -75,6 +75,6 @@ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>; ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>; ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>; - ti,min-output-imepdance; + ti,min-output-impedance; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi index 34887a10c5f1..47ba97229a48 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi @@ -319,8 +319,6 @@ compatible = "fsl,imx6q-nitrogen6_max-sgtl5000", "fsl,imx-audio-sgtl5000"; model = "imx6q-nitrogen6_max-sgtl5000"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sgtl5000>; ssi-controller = <&ssi1>; audio-codec = <&codec>; audio-routing = @@ -402,6 +400,8 @@ codec: sgtl5000@0a { compatible = "fsl,sgtl5000"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sgtl5000>; reg = <0x0a>; clocks = <&clks IMX6QDL_CLK_CKO>; VDDA-supply = <®_2p5v>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi index d80f21abea62..31d4cc62dbc7 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi @@ -250,8 +250,6 @@ compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000", "fsl,imx-audio-sgtl5000"; model = "imx6q-nitrogen6_som2-sgtl5000"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sgtl5000>; ssi-controller = <&ssi1>; audio-codec = <&codec>; audio-routing = @@ -320,6 +318,8 @@ codec: sgtl5000@0a { compatible = "fsl,sgtl5000"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sgtl5000>; reg = <0x0a>; clocks = <&clks IMX6QDL_CLK_CKO>; VDDA-supply = <®_2p5v>; diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts index da8598402ab8..38faa90007d7 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts @@ -158,7 +158,7 @@ &mmc1 { interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; pinctrl-names = "default"; - pinctrl-0 = <&mmc1_pins &mmc1_cd>; + pinctrl-0 = <&mmc1_pins>; wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */ vmmc-supply = <&vmmc1>; @@ -193,7 +193,8 @@ OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ - OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/ + OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */ + OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */ >; }; @@ -242,12 +243,6 @@ OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */ >; }; - - mmc1_cd: pinmux_mmc1_cd { - pinctrl-single,pins = < - OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */ - >; - }; }; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 7cd92babc41a..0844737b72b2 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -988,6 +988,7 @@ phy-names = "sata-phy"; clocks = <&sata_ref_clk>; ti,hwmods = "sata"; + ports-implemented = <0x1>; }; dss: dss@58000000 { diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi index 5ae4ec59e6ea..c852b69229c9 100644 --- a/arch/arm/boot/dts/qcom-mdm9615.dtsi +++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi @@ -357,7 +357,7 @@ }; amba { - compatible = "arm,amba-bus"; + compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts index 735914f6ae44..7cae328398b1 100644 --- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts +++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts @@ -140,6 +140,10 @@ cpu-supply = <®_dcdc3>; }; +&de { + status = "okay"; +}; + &ehci0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 2b26175d55d1..e78faaf9243c 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -234,6 +234,7 @@ de: display-engine { compatible = "allwinner,sun6i-a31-display-engine"; allwinner,pipelines = <&fe0>; + status = "disabled"; }; soc@01c00000 { diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts index 5ea4915f6d75..10d307408f23 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts @@ -56,7 +56,7 @@ }; &pio { - mmc2_pins_nrst: mmc2@0 { + mmc2_pins_nrst: mmc2-rst-pin { allwinner,pins = "PC16"; allwinner,function = "gpio_out"; allwinner,drive = <SUN4I_PINCTRL_10_MA>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b01a43851294..028d2b70e3b5 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y CONFIG_DW_WATCHDOG=y CONFIG_DIGICOLOR_WATCHDOG=y CONFIG_BCM2835_WDT=y -CONFIG_BCM47XX_WATCHDOG=y +CONFIG_BCM47XX_WDT=y CONFIG_BCM7038_WDT=m CONFIG_BCM_KONA_WDT=y CONFIG_MFD_ACT8945A=y @@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y CONFIG_RASPBERRYPI_FIRMWARE=y CONFIG_EFI_VARS=m CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_CONFIG_BCM47XX_NVRAM=y +CONFIG_BCM47XX_NVRAM=y CONFIG_BCM47XX_SPROM=y CONFIG_EXT4_FS=y CONFIG_AUTOFS4_FS=y diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 4364040ed696..1e6c48dd7b11 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 522b5feb4eaa..b62eaeb147aa 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -94,6 +94,9 @@ #define ARM_CPU_XSCALE_ARCH_V2 0x4000 #define ARM_CPU_XSCALE_ARCH_V3 0x6000 +/* Qualcomm implemented cores */ +#define ARM_CPU_PART_SCORPION 0x510002d0 + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index bfe2a2f5a644..22b73112b75f 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level) #define ftrace_return_address(n) return_address(n) +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME + +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + if (!strcmp(sym, "sys_mmap2")) + sym = "sys_mmap_pgoff"; + else if (!strcmp(sym, "sys_statfs64_wrapper")) + sym = "sys_statfs64"; + else if (!strcmp(sym, "sys_fstatfs64_wrapper")) + sym = "sys_fstatfs64"; + else if (!strcmp(sym, "sys_arm_fadvise64_64")) + sym = "sys_fadvise64_64"; + + /* Ignore case since sym may start with "SyS" instead of "sys" */ + return !strcasecmp(sym, name); +} + #endif /* ifndef __ASSEMBLY__ */ #endif /* _ASM_ARM_FTRACE */ diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index a2e75b84e2ae..6dae1956c74d 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void) return false; } +static inline bool has_vhe(void) +{ + return false; +} + /* The section containing the hypervisor idmap text */ extern char __hyp_idmap_text_start[]; extern char __hyp_idmap_text_end[]; diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h index a53cdb8f068c..9435a42f575e 100644 --- a/arch/arm/include/asm/types.h +++ b/arch/arm/include/uapi/asm/types.h @@ -1,5 +1,5 @@ -#ifndef _ASM_TYPES_H -#define _ASM_TYPES_H +#ifndef _UAPI_ASM_TYPES_H +#define _UAPI_ASM_TYPES_H #include <asm-generic/int-ll64.h> @@ -37,4 +37,4 @@ #define __UINTPTR_TYPE__ unsigned long #endif -#endif /* _ASM_TYPES_H */ +#endif /* _UAPI_ASM_TYPES_H */ diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 188180b5523d..be3b3fbd382f 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void) return 0; } + /* + * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD + * whenever a WFI is issued, even if the core is not powered down, in + * violation of the architecture. When DBGPRSR.SPD is set, accesses to + * breakpoint and watchpoint registers are treated as undefined, so + * this results in boot time and runtime failures when these are + * accessed and we unexpectedly take a trap. + * + * It's not clear if/how this can be worked around, so we blacklist + * Scorpion CPUs to avoid these issues. + */ + if (read_cpuid_part() == ARM_CPU_PART_SCORPION) { + pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); + return 0; + } + has_ossr = core_has_os_save_restore(); /* Determine how many BRPs/WRPs are available. */ diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 22313cb53362..9af0701f7094 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -9,6 +9,7 @@ */ #include <linux/preempt.h> #include <linux/smp.h> +#include <linux/uaccess.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> @@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg) static inline void ipi_flush_tlb_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; + unsigned int __ua_flags = uaccess_save_and_enable(); local_flush_tlb_page(ta->ta_vma, ta->ta_start); + + uaccess_restore(__ua_flags); } static inline void ipi_flush_tlb_kernel_page(void *arg) @@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg) static inline void ipi_flush_tlb_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; + unsigned int __ua_flags = uaccess_save_and_enable(); local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); + + uaccess_restore(__ua_flags); } static inline void ipi_flush_tlb_kernel_range(void *arg) diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 11676787ad49..9d7446456e0c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); __cpu_init_stage2(); + if (is_kernel_in_hyp_mode()) + kvm_timer_init_vhe(); + kvm_arm_init_debug(); } diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index f6ba589cd312..c821c1d5610e 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c @@ -32,7 +32,6 @@ #include "soc.h" #define OMAP1_DMA_BASE (0xfffed800) -#define OMAP1_LOGICAL_DMA_CH_COUNT 17 static u32 enable_1510_mode; @@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void) goto exit_iounmap; } - d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; - /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; @@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void) d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16; - if (cpu_is_omap15xx()) - d->chan_count = 9; - else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { - if (!(d->dev_caps & ENABLE_1510_MODE)) - d->chan_count = 16; + /* available logical channels */ + if (cpu_is_omap15xx()) { + d->lch_count = 9; + } else { + if (d->dev_caps & ENABLE_1510_MODE) + d->lch_count = 9; else - d->chan_count = 9; + d->lch_count = 16; } p = dma_plat_info; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 477910a48448..70c004794880 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = { .nshutdown_gpio = 162, .dev_name = "/dev/ttyO1", .flow_cntrl = 1, - .baud_rate = 300000, + .baud_rate = 3000000, }; static struct platform_device wl128x_device = { diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c index 8538910db202..a970e7fcba9e 100644 --- a/arch/arm/mach-ux500/pm.c +++ b/arch/arm/mach-ux500/pm.c @@ -134,8 +134,8 @@ bool prcmu_pending_irq(void) */ bool prcmu_is_cpu_in_wfi(int cpu) { - return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : - PRCM_ARM_WFI_STANDBY_WFI0; + return readl(PRCM_ARM_WFI_STANDBY) & + (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); } /* diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 238fbeacd330..5d28e1cdc998 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -137,6 +137,10 @@ }; }; +&scpi_clocks { + status = "disabled"; +}; + &uart_AO { status = "okay"; pinctrl-0 = <&uart_ao_a_pins>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 596240c38a9c..b35307321b63 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -55,7 +55,7 @@ mboxes = <&mailbox 1 &mailbox 2>; shmem = <&cpu_scp_lpri &cpu_scp_hpri>; - clocks { + scpi_clocks: clocks { compatible = "arm,scpi-clocks"; scpi_dvfs: scpi_clocks@0 { diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index 64226d5ae471..135890cd8a85 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -1367,7 +1367,7 @@ }; amba { - compatible = "arm,amba-bus"; + compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts index 358089687a69..ef1b9e573af0 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts @@ -27,7 +27,7 @@ stdout-path = "serial0:115200n8"; }; - memory { + memory@0 { device_type = "memory"; reg = <0x0 0x0 0x0 0x40000000>; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 68a908334c7b..54dc28351c8c 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -72,7 +72,7 @@ <1 10 0xf08>; }; - amba_apu { + amba_apu: amba_apu@0 { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <1>; @@ -175,7 +175,7 @@ }; i2c0: i2c@ff020000 { - compatible = "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 17 4>; @@ -185,7 +185,7 @@ }; i2c1: i2c@ff030000 { - compatible = "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 18 4>; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index bfe632808d77..90c39a662379 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) +#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index fea10736b11f..439f6b5d31f6 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -47,6 +47,7 @@ #include <asm/ptrace.h> #include <asm/sections.h> #include <asm/sysreg.h> +#include <asm/cpufeature.h> /* * __boot_cpu_mode records what mode CPUs were booted in. @@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void) return read_sysreg(CurrentEL) == CurrentEL_EL2; } +static inline bool has_vhe(void) +{ + if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) + return true; + + return false; +} + #ifdef CONFIG_ARM64_VHE extern void verify_cpu_run_el(void); #else diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index b5c3933ed441..d1ff83dfe5de 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -77,6 +77,7 @@ struct user_fpsimd_state { __uint128_t vregs[32]; __u32 fpsr; __u32 fpcr; + __u32 __reserved[2]; }; struct user_hwdebug_state { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 923841ffe4a9..43512d4d7df2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -683,7 +683,7 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mov x2, x25 - bl bad_mode + bl bad_el0_sync b ret_to_user ENDPROC(el0_sync) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fc35e06ccaac..a22161ccf447 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target, /* (address, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { + if (count < PTRACE_HBP_ADDR_SZ) + return -EINVAL; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, offset, offset + PTRACE_HBP_ADDR_SZ); if (ret) @@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_ADDR_SZ; + if (!count) + break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) @@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_pt_regs newregs; + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); if (ret) @@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_fpsimd_state newstate; + struct user_fpsimd_state newstate = + target->thread.fpsimd_state.user_fpsimd; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); if (ret) @@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - unsigned long tls; + unsigned long tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) @@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int syscallno, ret; + int syscallno = task_pt_regs(target)->syscallno; + int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); if (ret) @@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target, const void __user *ubuf) { int ret; - compat_ulong_t tls; + compat_ulong_t tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5b830be79c01..659b2e6b6cf7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr) } /* - * bad_mode handles the impossible case in the exception vector. + * bad_mode handles the impossible case in the exception vector. This is always + * fatal. */ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) { - siginfo_t info; - void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); + + die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); +} + +/* + * bad_el0_sync handles unexpected, but potentially recoverable synchronous + * exceptions taken from EL0. Unlike bad_mode, this returns. + */ +asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +{ + siginfo_t info; + void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", + smp_processor_id(), esr, esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; @@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) info.si_code = ILL_ILLOPC; info.si_addr = pc; - arm64_notify_die("Oops - bad mode", regs, &info, 0); + current->thread.fault_address = 0; + current->thread.fault_code = 0; + + force_sig_info(info.si_signo, &info, current); } void __pte_error(const char *file, int line, unsigned long val) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 716d1226ba69..380ebe705093 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -404,6 +404,8 @@ void __init mem_init(void) if (swiotlb_force == SWIOTLB_FORCE || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; set_max_mapnr(pfn_to_page(max_pfn) - mem_map); diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 1c64bc6330bc..0c4e470571ca 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -36,12 +36,13 @@ #ifdef CONFIG_HUGETLB_PAGE static inline int hash__hugepd_ok(hugepd_t hpd) { + unsigned long hpdval = hpd_val(hpd); /* * if it is not a pte and have hugepd shift mask * set, then it is a hugepd directory pointer */ - if (!(hpd.pd & _PAGE_PTE) && - ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) + if (!(hpdval & _PAGE_PTE) && + ((hpdval & HUGEPD_SHIFT_MASK) != 0)) return true; return false; } diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index f61cad3de4e6..4c935f7504f7 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start, unsigned long phys); extern void hash__vmemmap_remove_mapping(unsigned long start, unsigned long page_size); + +int hash__create_section_mapping(unsigned long start, unsigned long end); +int hash__remove_section_mapping(unsigned long start, unsigned long end); + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index ede215167d1a..7f4025a6c69e 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd) * We have only four bits to encode, MMU page size */ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); - return __va(hpd.pd & HUGEPD_ADDR_MASK); + return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); } static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) { - return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; + return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; } static inline unsigned int hugepd_shift(hugepd_t hpd) @@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd) { BUG_ON(!hugepd_ok(hpd)); #ifdef CONFIG_PPC_8xx - return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); + return (pte_t *)__va(hpd_val(hpd) & + ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); #else - return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); + return (pte_t *)((hpd_val(hpd) & + ~HUGEPD_SHIFT_MASK) | PD_HUGE); #endif } static inline unsigned int hugepd_shift(hugepd_t hpd) { #ifdef CONFIG_PPC_8xx - return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; + return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; #else - return hpd.pd & HUGEPD_SHIFT_MASK; + return hpd_val(hpd) & HUGEPD_SHIFT_MASK; #endif } diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 172849727054..0cd8a3852763 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, static inline int hugepd_ok(hugepd_t hpd) { #ifdef CONFIG_PPC_8xx - return ((hpd.pd & 0x4) != 0); + return ((hpd_val(hpd) & 0x4) != 0); #else - return (hpd.pd > 0); + /* We clear the top bit to indicate hugepd */ + return ((hpd_val(hpd) & PD_HUGE) == 0); #endif } diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 56398e7e6100..47120bf2670c 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -294,15 +294,12 @@ extern long long virt_phys_offset; #include <asm/pgtable-types.h> #endif -typedef struct { signed long pd; } hugepd_t; #ifndef CONFIG_HUGETLB_PAGE #define is_hugepd(pdep) (0) #define pgd_huge(pgd) (0) #endif /* CONFIG_HUGETLB_PAGE */ -#define __hugepd(x) ((hugepd_t) { (x) }) - struct page; extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long vaddr, diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index e157489ee7a1..ae0a23091a9b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -65,6 +65,7 @@ struct power_pmu { #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ +#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index 49c0a5a80efa..9c0f5db5cf46 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new) return pmd_raw(old) == prev; } +typedef struct { __be64 pdbe; } hugepd_t; +#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) }) + +static inline unsigned long hpd_val(hugepd_t x) +{ + return be64_to_cpu(x.pdbe); +} + #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index e7f4f3e0fcde..8bd3b13fe2fb 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) } #endif +typedef struct { unsigned long pd; } hugepd_t; +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline unsigned long hpd_val(hugepd_t x) +{ + return x.pd; +} + #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c56ea8c84abb..c4ced1d01d57 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -157,7 +157,7 @@ #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe #define PPC_INST_MFSPR_PVR 0x7c1f42a6 -#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff +#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe #define PPC_INST_MFTMR 0x7c0002dc #define PPC_INST_MSGSND 0x7c00019c #define PPC_INST_MSGCLR 0x7c0001dc @@ -174,13 +174,13 @@ #define PPC_INST_RFDI 0x4c00004e #define PPC_INST_RFMCI 0x4c00004c #define PPC_INST_MFSPR_DSCR 0x7c1102a6 -#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff +#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe #define PPC_INST_MTSPR_DSCR 0x7c1103a6 -#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff +#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe #define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 -#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff +#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe #define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 -#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff +#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe #define PPC_INST_MFVSRD 0x7c000066 #define PPC_INST_MTVSRD 0x7c000166 #define PPC_INST_SLBFEE 0x7c0007a7 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 8180bfd7ab93..9de7f79e702b 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) * * For pHyp, we have to enable IO for log retrieval. Otherwise, * 0xFF's is always returned from PCI config space. + * + * When the @severity is EEH_LOG_PERM, the PE is going to be + * removed. Prior to that, the drivers for devices included in + * the PE will be closed. The drivers rely on working IO path + * to bring the devices to quiet state. Otherwise, PCI traffic + * from those devices after they are removed is like to cause + * another unexpected EEH error. */ if (!(pe->type & EEH_PE_PHB)) { - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || + severity == EEH_LOG_PERM) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); /* diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index e4744ff38a17..925a4ef90559 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, flush_fp_to_thread(target); + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.TS_FPR(i); + buf[32] = target->thread.fp_state.fpscr; + /* copy to local buffer then write that out */ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); if (i) @@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, flush_altivec_to_thread(target); flush_vsx_to_thread(target); + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); if (!ret) @@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target, flush_fp_to_thread(target); flush_altivec_to_thread(target); + for (i = 0; i < 32; i++) + buf[i] = target->thread.TS_CKFPR(i); + buf[32] = target->thread.ckfp_state.fpscr; + /* copy to local buffer then write that out */ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); if (i) @@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target, flush_altivec_to_thread(target); flush_vsx_to_thread(target); + for (i = 0; i < 32 ; i++) + buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); if (!ret) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 80334937e14f..67e19a0821be 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int create_section_mapping(unsigned long start, unsigned long end) +int hash__create_section_mapping(unsigned long start, unsigned long end) { int rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(PAGE_KERNEL), mmu_linear_psize, @@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end) return rc; } -int remove_section_mapping(unsigned long start, unsigned long end) +int hash__remove_section_mapping(unsigned long start, unsigned long end) { int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index d5026f3800b6..37b5f91e381b 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, int hugepd_ok(hugepd_t hpd) { bool is_hugepd; + unsigned long hpdval; + + hpdval = hpd_val(hpd); /* * We should not find this format in page directory, warn otherwise. */ - is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); + is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0)); WARN(is_hugepd, "Found wrong page directory format\n"); return 0; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 289df38fb7e0..8c3389cbcd12 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES]; static unsigned nr_gpages; #endif -#define hugepd_none(hpd) ((hpd).pd == 0) +#define hugepd_none(hpd) (hpd_val(hpd) == 0) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { @@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, for (i = 0; i < num_hugepd; i++, hpdp++) { if (unlikely(!hugepd_none(*hpdp))) break; - else + else { #ifdef CONFIG_PPC_BOOK3S_64 - hpdp->pd = __pa(new) | - (shift_to_mmu_psize(pshift) << 2); + *hpdp = __hugepd(__pa(new) | + (shift_to_mmu_psize(pshift) << 2)); #elif defined(CONFIG_PPC_8xx) - hpdp->pd = __pa(new) | - (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : - _PMD_PAGE_512K) | - _PMD_PRESENT; + *hpdp = __hugepd(__pa(new) | + (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : + _PMD_PAGE_512K) | _PMD_PRESENT); #else /* We use the old format for PPC_FSL_BOOK3E */ - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; + *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); #endif + } } /* If we bailed from the for loop early, an error occurred, clean up */ if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) - hpdp->pd = 0; + *hpdp = __hugepd(0); kmem_cache_free(cachep, new); } spin_unlock(&mm->page_table_lock); @@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif return; for (i = 0; i < num_hugepd; i++, hpdp++) - hpdp->pd = 0; + *hpdp = __hugepd(0); if (shift >= pdshift) hugepd_free(tlb, hugepte); @@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift) { + if (pdshift > shift) pgtable_cache_add(pdshift - shift, NULL); - if (!PGT_CACHE(pdshift - shift)) - panic("hugetlbpage_init(): could not create " - "pgtable cache for %d bit pagesize\n", shift); - } #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) else if (!hugepte_cache) { /* @@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void) else if (mmu_psize_defs[MMU_PAGE_2M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; #endif - else - panic("%s: Unable to set default huge page size\n", __func__); - return 0; } diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index a175cd82ae8c..f2108c40e697 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); new = kmem_cache_create(name, table_size, align, 0, ctor); + if (!new) + panic("Could not allocate pgtable cache for order %d", shift); + kfree(name); pgtable_cache[shift - 1] = new; + pr_debug("Allocated pgtable cache for order %d\n", shift); } @@ -88,7 +92,7 @@ void pgtable_cache_init(void) { pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); - if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) + if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX)) pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); /* * In all current configs, when the PUD index exists it's the @@ -97,11 +101,4 @@ void pgtable_cache_init(void) */ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); - - if (!PGT_CACHE(PGD_INDEX_SIZE)) - panic("Couldn't allocate pgd cache"); - if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) - panic("Couldn't allocate pmd pgtable caches"); - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) - panic("Couldn't allocate pud pgtable caches"); } diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index ebf9782bacf9..653ff6c74ebe 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -126,3 +126,21 @@ void mmu_cleanup_all(void) else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); } + +#ifdef CONFIG_MEMORY_HOTPLUG +int create_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__create_section_mapping(start, end); +} + +int remove_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__remove_section_mapping(start, end); +} +#endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index fd3e4034c04d..270eb9b74e2e 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs) */ if (TRAP(regs) != 0xf00) use_siar = 0; + else if ((ppmu->flags & PPMU_NO_SIAR)) + use_siar = 0; else if (marked) use_siar = 1; else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index 6447dc1c3d89..929b56d47ad9 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e) EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) EVENT(PM_CMPLU_STALL, 0x1e054) EVENT(PM_INST_CMPL, 0x00002) -EVENT(PM_BRU_CMPL, 0x40060) +EVENT(PM_BRU_CMPL, 0x10012) EVENT(PM_BR_MPRED_CMPL, 0x400f6) /* All L1 D cache load references counted at finish, gated by reject */ diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 346010e8d463..7332634e18c9 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = { .bhrb_filter_map = power9_bhrb_filter_map, .get_constraint = isa207_get_constraint, .disable_pmc = isa207_disable_pmc, - .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, + .flags = PPMU_NO_SIAR | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power9_generic_events), .generic_events = power9_generic_events, .cache_events = &power9_cache_events, diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index d38e86fd5720..60c57657c772 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -20,6 +20,7 @@ #include <asm/xics.h> #include <asm/io.h> #include <asm/opal.h> +#include <asm/kvm_ppc.h> static void icp_opal_teardown_cpu(void) { @@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void) * Should we be flagging idle loop instead? * Or creating some task to be scheduled? */ - opal_int_eoi((0x00 << 24) | XICS_IPI); + if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0) + force_external_irq_replay(); +} + +static unsigned int icp_opal_get_xirr(void) +{ + unsigned int kvm_xirr; + __be32 hw_xirr; + int64_t rc; + + /* Handle an interrupt latched by KVM first */ + kvm_xirr = kvmppc_get_xics_latch(); + if (kvm_xirr) + return kvm_xirr; + + /* Then ask OPAL */ + rc = opal_int_get_xirr(&hw_xirr, false); + if (rc < 0) + return 0; + return be32_to_cpu(hw_xirr); } static unsigned int icp_opal_get_irq(void) @@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void) unsigned int xirr; unsigned int vec; unsigned int irq; - int64_t rc; - rc = opal_int_get_xirr(&xirr, false); - if (rc < 0) - return 0; - xirr = be32_to_cpu(xirr); + xirr = icp_opal_get_xirr(); vec = xirr & 0x00ffffff; if (vec == XICS_IRQ_SPURIOUS) return 0; @@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void) xics_mask_unknown_vec(vec); /* We might learn about it later, so EOI it */ - opal_int_eoi(xirr); + if (opal_int_eoi(xirr) > 0) + force_external_irq_replay(); return 0; } diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index e659daffe368..e00975361fec 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -69,7 +69,7 @@ CONFIG_CMA=y CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUGFS=y CONFIG_MEM_SOFT_DIRTY=y -CONFIG_ZPOOL=m +CONFIG_ZSWAP=y CONFIG_ZBUD=m CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC_STAT=y @@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m -CONFIG_NFT_RBTREE=m -CONFIG_NFT_HASH=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m CONFIG_NFT_NAT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NF_TABLES_ARP=m @@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_IPVLAN=m CONFIG_VXLAN=m CONFIG_TUN=m CONFIG_VETH=m @@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y CONFIG_JBD2_DEBUG=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y CONFIG_DEBUG_LOCKDEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y -CONFIG_DEBUG_LIST=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_CREDENTIALS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=300 CONFIG_NOTIFIER_ERROR_INJECTION=m -CONFIG_CPU_NOTIFIER_ERROR_INJECT=m CONFIG_PM_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y CONFIG_FAILSLAB=y @@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y CONFIG_FUNCTION_PROFILER=y +CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y @@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y CONFIG_TEST_KSTRTOX=y CONFIG_DMA_API_DEBUG=y CONFIG_TEST_BPF=m +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_IMA=y CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_RSA=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 95ceac50bc65..f05d2d6e1087 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y @@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y +CONFIG_LIVEPATCH=y CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=256 +CONFIG_NR_CPUS=512 CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y @@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y CONFIG_CMA=y +CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y CONFIG_ZBUD=m CONFIG_ZSMALLOC=m @@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m -CONFIG_NFT_RBTREE=m -CONFIG_NFT_HASH=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m CONFIG_NFT_NAT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NF_TABLES_ARP=m @@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_IPVLAN=m CONFIG_VXLAN=m CONFIG_TUN=m CONFIG_VETH=m @@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m @@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y CONFIG_JBD2_DEBUG=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_PANIC_ON_OOPS=y CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_NOTIFIER_ERROR_INJECTION=m -CONFIG_CPU_NOTIFIER_ERROR_INJECT=m -CONFIG_PM_NOTIFIER_ERROR_INJECT=m CONFIG_LATENCYTOP=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_KPROBE_EVENT is not set +CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m -CONFIG_RBTREE_TEST=m -CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m @@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_CORDIC=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index bc7b176f5795..2cf87343b590 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y CONFIG_CMA=y +CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y CONFIG_ZBUD=m CONFIG_ZSMALLOC=m @@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m @@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m -CONFIG_NFT_RBTREE=m -CONFIG_NFT_HASH=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m CONFIG_NFT_NAT=m CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NF_TABLES_ARP=m @@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m CONFIG_IFB=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_IPVLAN=m CONFIG_VXLAN=m CONFIG_TUN=m CONFIG_VETH=m @@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y CONFIG_JBD2_DEBUG=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y CONFIG_FUNCTION_PROFILER=y +CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m @@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m @@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_CORDIC=m diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 2d40ef0a6295..d00e368fb5e6 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y CONFIG_STATIC_KEYS_SELFTEST=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y @@ -130,8 +129,11 @@ CONFIG_DUMMY=m CONFIG_EQUALIZER=m CONFIG_TUN=m CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set +CONFIG_DEVKMEM=y CONFIG_RAW_DRIVER=m CONFIG_VIRTIO_BALLOON=y CONFIG_EXT4_FS=y @@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index bec71e902be3..6484a250021e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, S390_ARCH_FAC_LIST_SIZE_BYTE); memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_BYTE); + sizeof(S390_lowcore.stfle_fac_list)); if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) ret = -EFAULT; kfree(mach); @@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* Populate the facility mask initially. */ memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_BYTE); + sizeof(S390_lowcore.stfle_fac_list)); for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { if (i < kvm_s390_fac_list_mask_size()) kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 05612a2529c8..496e60391fac 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void) * all online cpus. */ cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, - "perf/x86/amd/ibs:STARTING", + "perf/x86/amd/ibs:starting", x86_pmu_amd_ibs_starting_cpu, x86_pmu_amd_ibs_dying_cpu); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d611cab214a6..eb1484c86bb4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { for_each_cpu(i, topology_sibling_cpumask(cpu)) { + struct cpu_hw_events *sibling; struct intel_excl_cntrs *c; - c = per_cpu(cpu_hw_events, i).excl_cntrs; + sibling = &per_cpu(cpu_hw_events, i); + c = sibling->excl_cntrs; if (c && c->core_id == core_id) { cpuc->kfree_on_online[1] = cpuc->excl_cntrs; cpuc->excl_cntrs = c; - cpuc->excl_thread_id = 1; + if (!sibling->excl_thread_id) + cpuc->excl_thread_id = 1; break; } } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 945e512a112a..1e35dd06b090 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = { .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ack_level, .irq_set_affinity = ioapic_set_affinity, + .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SKIP_SET_WAKE, }; @@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ir_ack_level, .irq_set_affinity = ioapic_set_affinity, + .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SKIP_SET_WAKE, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 57d8a856cdc5..d153be8929a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6171,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, + &ctxt->exception); } static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 3cd69832d7f4..3961103e9176 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */ + { + .callback = set_nouse_crs, + .ident = "Supermicro X8DTH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"), + DMI_MATCH(DMI_BIOS_VERSION, "2.0a"), + }, + }, /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ { diff --git a/block/blk-mq.c b/block/blk-mq.c index a8e67a155d04..c3400b5444a7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx) { LIST_HEAD(rq_list); - LIST_HEAD(driver_list); if (unlikely(blk_mq_hctx_stopped(hctx))) return; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 50a2020b5b72..9fd06eeb1a17 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index, static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); - int result, flags; + int result; struct nbd_request request; unsigned long size = blk_rq_bytes(req); struct bio *bio; @@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) if (type != NBD_CMD_WRITE) return 0; - flags = 0; bio = req->bio; while (bio) { struct bio *next = bio->bi_next; @@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) bio_for_each_segment(bvec, bio, iter) { bool is_last = !next && bio_iter_last(bvec, iter); + int flags = is_last ? 0 : MSG_MORE; - if (is_last) - flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); result = sock_send_bvec(nbd, index, &bvec, flags); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 8b00e79c2683..17857beb4892 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 8c8b495cbf0d..cdc092a1d9ef 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", - GATE_BUS_TOP, 27, 0, 0), + GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", - GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), + GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", - GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", GATE_BUS_TOP, 5, 0, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", - GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", @@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", - GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", GATE_BUS_TOP, 16, 0, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, 0, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", - GATE_BUS_TOP, 18, 0, 0), + GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0), GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", GATE_BUS_TOP, 28, 0, 0), GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", GATE_BUS_TOP, 29, 0, 0), GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", - SRC_MASK_TOP2, 24, 0, 0), + SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0), GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, 0, 0), diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 4da1dc2278bd..670ff0f25b67 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) if (mct_int_type == MCT_INT_SPI) { if (evt->irq != -1) disable_irq_nosync(evt->irq); + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 717704e9ae07..c0303f61c26a 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev) struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int brightness; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 5) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } brightness = data[4]; if (brightness < 0 || brightness > 3) { dev_warn(dev, "Read invalid backlight brightness: %02hhx.\n", data[4]); - return -EIO; + ret = -EIO; + goto out; } - return brightness; + ret = brightness; +out: + kfree(data); + + return ret; } static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) @@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); const char *macro_mode; - char data[8]; + char *data; + + data = kmalloc(2, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_GET_MODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 2, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 1) { dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } switch (data[0]) { @@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev, default: dev_warn(dev, "K90 in unknown mode: %02hhx.\n", data[0]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); + ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); +out: + kfree(data); + + return ret; } static ssize_t k90_store_macro_mode(struct device *dev, @@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 8) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); + ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile); +out: + kfree(data); + + return ret; } static ssize_t k90_store_current_profile(struct device *dev, diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b9779bcbd140..8aeca038cc73 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev) return retval; } + if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) + wacom_wac->shared->touch = hdev; + else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) + wacom_wac->shared->pen = hdev; + out: mutex_unlock(&wacom_udev_list_lock); return retval; @@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) if (error) goto fail; - error = wacom_add_shared_data(hdev); - if (error) - goto fail; - /* * Bamboo Pad has a generic hid handling for the Pen, and we switch it * into debug mode for the touch part. @@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) wacom_update_name(wacom, wireless ? " (WL)" : ""); - if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) - wacom_wac->shared->touch = hdev; - else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) - wacom_wac->shared->pen = hdev; + error = wacom_add_shared_data(hdev); + if (error) + goto fail; if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && (features->quirks & WACOM_QUIRK_BATTERY)) { diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index b1a9a3ca6d56..0884dc9554fd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) wacom_report_events(hdev, report); + /* + * Non-input reports may be sent prior to the device being + * completely initialized. Since only their events need + * to be processed, exit after 'wacom_report_events' has + * been called to prevent potential crashes in the report- + * processing functions. + */ + if (report->type != HID_INPUT_REPORT) + return; + if (WACOM_PAD_FIELD(field)) { wacom_wac_pad_battery_report(hdev, report); if (wacom->wacom_wac.pad_input) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index b11c3455b040..e6ea8503f40c 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, } } while (busy); - if (host->ops->card_busy && send_status) - return mmc_switch_status(card); - return 0; } @@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (!use_busy_signal) goto out; - /* Switch to new timing before poll and check switch status. */ - if (timing) - mmc_set_timing(host, timing); - /*If SPI or used HW busy detection above, then we don't need to poll. */ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || - mmc_host_is_spi(host)) { - if (send_status) - err = mmc_switch_status(card); + mmc_host_is_spi(host)) goto out_tim; - } /* Let's try to poll to find out when the command is completed. */ err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); + if (err) + goto out; out_tim: - if (err && timing) - mmc_set_timing(host, old_timing); + /* Switch to new timing before check switch status. */ + if (timing) + mmc_set_timing(host, timing); + + if (send_status) { + err = mmc_switch_status(card); + if (err && timing) + mmc_set_timing(host, old_timing); + } out: mmc_retune_release(host); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b352760c041e..09739352834c 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) { struct meson_host *host = dev_id; struct mmc_request *mrq; - struct mmc_command *cmd = host->cmd; + struct mmc_command *cmd; u32 irq_en, status, raw_status; irqreturn_t ret = IRQ_HANDLED; if (WARN_ON(!host)) return IRQ_NONE; + cmd = host->cmd; + mrq = host->mrq; if (WARN_ON(!mrq)) @@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) int ret = IRQ_HANDLED; if (WARN_ON(!mrq)) - ret = IRQ_NONE; + return IRQ_NONE; if (WARN_ON(!cmd)) - ret = IRQ_NONE; + return IRQ_NONE; data = cmd->data; if (data) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 44ecebd1ea8c..c8b8ac66ff7e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); cmd1 = cmd->arg; + if (cmd->opcode == MMC_STOP_TRANSMISSION) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; @@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) ssp->base + HW_SSP_BLOCK_SIZE); } - if ((cmd->opcode == MMC_STOP_TRANSMISSION) || - (cmd->opcode == SD_IO_RW_EXTENDED)) + if (cmd->opcode == SD_IO_RW_EXTENDED) cmd0 |= BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 160f695cc09c..278a5a435ab7 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); list_for_each_entry(child, &device->children, node) - acpi_device_fix_up_power(child); + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); if (acpi_bus_get_status(device) || !device->status.present) return -ENODEV; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 353a9ddf6b97..9ce5dcb4abd0 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -426,6 +426,7 @@ config MTD_NAND_ORION config MTD_NAND_OXNAS tristate "NAND Flash support for Oxford Semiconductor SoC" + depends on HAS_IOMEM help This enables the NAND flash controller on Oxford Semiconductor SoCs. @@ -540,7 +541,7 @@ config MTD_NAND_FSMC Flexible Static Memory Controller (FSMC) config MTD_NAND_XWAY - tristate "Support for NAND on Lantiq XWAY SoC" + bool "Support for NAND on Lantiq XWAY SoC" depends on LANTIQ && SOC_TYPE_XWAY help Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 5553a5d9efd1..846a66c1b133 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) init_completion(&host->comp_controller); host->irq = platform_get_irq(pdev, 0); - if ((host->irq < 0) || (host->irq >= NR_IRQS)) { + if (host->irq < 0) { dev_err(&pdev->dev, "failed to get platform irq\n"); res = -EINVAL; goto err_exit3; diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 28c7f474be77..4a5e948c62df 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev) if (IS_ERR(nfc->pbus_base)) return PTR_ERR(nfc->pbus_base); + writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); + clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox"); + nfc->chan = dma_request_chan(&pdev->dev, "rxtx"); if (IS_ERR(nfc->chan)) return PTR_ERR(nfc->chan); diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 1f2948c0c458..895101a5e686 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = { { .compatible = "lantiq,nand-xway" }, {}, }; -MODULE_DEVICE_TABLE(of, xway_nand_match); static struct platform_driver xway_nand_driver = { .probe = xway_nand_probe, @@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = { }, }; -module_platform_driver(xway_nand_driver); - -MODULE_LICENSE("GPL"); +builtin_platform_driver(xway_nand_driver); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7e8cf213fd81..744ed6ddaf37 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct bcm_sysport_cb *cb; - struct netdev_queue *txq; u32 hw_ind; - txq = netdev_get_tx_queue(ndev, ring->index); - /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; @@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ring->c_index = c_index; - if (netif_tx_queue_stopped(txq) && pkts_compl) - netif_tx_wake_queue(txq); - netif_dbg(priv, tx_done, ndev, "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", ring->index, ring->c_index, pkts_compl, bytes_compl); @@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { + struct netdev_queue *txq; unsigned int released; unsigned long flags; + txq = netdev_get_tx_queue(priv->netdev, ring->index); + spin_lock_irqsave(&ring->lock, flags); released = __bcm_sysport_tx_reclaim(priv, ring); + if (released) + netif_tx_wake_queue(txq); + spin_unlock_irqrestore(&ring->lock, flags); return released; } +/* Locked version of the per-ring TX reclaim, but does not wake the queue */ +static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + __bcm_sysport_tx_reclaim(priv, ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_tx_ring *ring = @@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, napi_disable(&ring->napi); netif_napi_del(&ring->napi); - bcm_sysport_tx_reclaim(priv, ring); + bcm_sysport_tx_clean(priv, ring); kfree(ring->cbs); ring->cbs = NULL; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9211c750e064..2f85b64f01fa 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -47,8 +47,9 @@ struct lmac { struct bgx { u8 bgx_id; struct lmac lmac[MAX_LMAC_PER_BGX]; - int lmac_count; + u8 lmac_count; u8 max_lmac; + u8 acpi_lmac_idx; void __iomem *reg_base; struct pci_dev *pdev; bool is_dlm; @@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; + bgx->acpi_lmac_idx++; /* move to next LMAC */ out: - bgx->lmac_count++; return AE_OK; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 0e74529a4209..30e855004c57 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: mutex_unlock(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index ec010ced6c99..1a7f8ad7b9c6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0; + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC + * address + */ + if (BEx_chip(adapter) && be_virtfn(adapter) && + !check_privilege(adapter, BE_PRIV_FILTMGMT)) + return -EPERM; + /* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done; @@ -3609,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) static void be_disable_if_filters(struct be_adapter *adapter) { - be_dev_mac_del(adapter, adapter->pmac_id[0]); + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) + be_dev_mac_del(adapter, adapter->pmac_id[0]); + be_clear_uc_list(adapter); be_clear_mc_list(adapter); @@ -3762,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* For BE3 VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) { status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index a849da92f857..6b8635378f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) { struct mlx4_cq *cq; + rcu_read_lock(); cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); + rcu_read_unlock(); + if (!cq) { mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ ++cq->arm_sn; cq->comp(cq); @@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; struct mlx4_cq *cq; - spin_lock(&cq_table->lock); - + rcu_read_lock(); cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); - - spin_unlock(&cq_table->lock); + rcu_read_unlock(); if (!cq) { - mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ cq->event(cq, event_type); - - if (atomic_dec_and_test(&cq->refcount)) - complete(&cq->free); } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (err) return err; - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); if (err) goto err_icm; @@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, return 0; err_radix: - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); @@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock(&cq_table->lock); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != priv->eq_table.eq[MLX4_EQ_ASYNC].irq) synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - spin_lock_irq(&cq_table->lock); - radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); - if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4910d9af1933..761f8b12399c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev) /* Process all completions if exist to prevent * the queues freezing if they are full */ - for (i = 0; i < priv->rx_ring_num; i++) + for (i = 0; i < priv->rx_ring_num; i++) { + local_bh_disable(); napi_schedule(&priv->rx_cq[i]->napi); + local_bh_enable(); + } netif_tx_start_all_queues(dev); netif_device_attach(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd3638e6fe25..0509996957d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 56185a0b827d..1822382212ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, put_res(dev, slave, srqn, RES_SRQ); qp->srq = srq; } + + /* Save param3 for dynamic changes from VST back to VGT */ + qp->param3 = qpc->param3; put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, mtt_base, RES_MTT); res_end_move(dev, slave, RES_QP, qpn); @@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; u8 orig_sched_queue; - __be32 orig_param3 = qpc->param3; u8 orig_vlan_control = qpc->pri_path.vlan_control; u8 orig_fvl_rx = qpc->pri_path.fvl_rx; u8 orig_pri_path_fl = qpc->pri_path.fl; @@ -3814,7 +3816,6 @@ out: */ if (!err) { qp->sched_queue = orig_sched_queue; - qp->param3 = orig_param3; qp->vlan_control = orig_vlan_control; qp->fvl_rx = orig_fvl_rx; qp->pri_path_fl = orig_pri_path_fl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 118cea5e5489..46bef6a26a8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -668,9 +668,12 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, int ttl; #if IS_ENABLED(CONFIG_INET) + int ret; + rt = ip_route_output_key(dev_net(mirred_dev), fl4); - if (IS_ERR(rt)) - return PTR_ERR(rt); + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; #else return -EOPNOTSUPP; #endif @@ -741,8 +744,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct flowi4 fl4 = {}; char *encap_header; int encap_size; - __be32 saddr = 0; - int ttl = 0; + __be32 saddr; + int ttl; int err; encap_header = kzalloc(max_encap_size, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index d147ddd97997..0af3338bfcb4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); /* pci_eqe_cmd_token * Command completion event - token */ -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); +MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16); /* pci_eqe_cmd_status * Command completion event - status */ -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); +MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8); /* pci_eqe_cmd_out_param_h * Command completion event - output parameter - higher part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32); /* pci_eqe_cmd_out_param_l * Command completion event - output parameter - lower part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d768c7b6c6d6..003093abb170 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } if (eth_skb_pad(skb)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 150ccf5192a9..2e88115e8735 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 99a14df28b96..2851b4c56570 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) else adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); + /* of_phy_find_device() claims a reference to the phydev, + * so we do that here manually as well. When the driver + * later unloads, it can unilaterally drop the reference + * without worrying about ACPI vs DT. + */ + if (adpt->phydev) + get_device(&adpt->phydev->mdio.dev); } else { struct device_node *phy_np; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 422289c232bc..f46d300bd585 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev) err_undo_napi: netif_napi_del(&adpt->rx_q.napi); err_undo_mdiobus: - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); err_undo_clocks: emac_clks_teardown(adpt); @@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev) emac_clks_teardown(adpt); - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); free_netdev(netdev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 92d7692c840d..89ac1e3f6175 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { + if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); - } - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); - } out: return budget - quota; } @@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + entry / NUM_TX_DESC * DPTR_ALIGN; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + /* Zero length DMA descriptors are problematic as they seem to + * terminate DMA transfers. Avoid them by simply using a length of + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of + * data by the call to skb_put_padto() above this is safe with + * respect to both the length of the first DMA descriptor (len) + * overflowing the available data and the length of the second DMA + * descriptor (skb->len - len) being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + memcpy(buffer, skb->data, len); dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, dma_addr)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a276a32d57f2..e3f6389e1b01 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3326,9 +3326,9 @@ int stmmac_dvr_probe(struct device *device, (priv->plat->maxmtu >= ndev->min_mtu)) ndev->max_mtu = priv->plat->maxmtu; else if (priv->plat->maxmtu < ndev->min_mtu) - netdev_warn(priv->dev, - "%s: warning: maxmtu having invalid value (%d)\n", - __func__, priv->plat->maxmtu); + dev_warn(priv->device, + "%s: warning: maxmtu having invalid value (%d)\n", + __func__, priv->plat->maxmtu); if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ @@ -3340,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device, */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; - netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); } netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); @@ -3366,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device, /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { - netdev_err(priv->dev, - "%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); + dev_err(priv->device, + "%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); goto error_mdio_register; } } ret = register_netdev(ndev); if (ret) { - netdev_err(priv->dev, "%s: ERROR %i registering the device\n", - __func__, ret); + dev_err(priv->device, "%s: ERROR %i registering the device\n", + __func__, ret); goto error_netdev_register; } diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77c88fcf2b86..9b8a30bf939b 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1210,7 +1210,7 @@ int cpmac_init(void) goto fail_alloc; } -#warning FIXME: unhardcode gpio&reset bits + /* FIXME: unhardcode gpio&reset bits */ ar7_gpio_disable(26); ar7_gpio_disable(27); ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c9414c054852..fcab8019dda0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, * policy filters on the host). Deliver these via the VF * interface in the guest. */ + rcu_read_lock(); vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); if (vf_netdev && (vf_netdev->flags & IFF_UP)) net = vf_netdev; @@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); if (unlikely(!skb)) { ++net->stats.rx_dropped; + rcu_read_unlock(); return NVSP_STAT_FAIL; } @@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, * TODO - use NAPI? */ netif_rx(skb); + rcu_read_unlock(); return 0; } diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 46d53a6c8cf8..76ba7ecfe142 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi) /* Reset */ if (gpio_is_valid(rstn)) { udelay(1); - gpio_set_value(rstn, 0); + gpio_set_value_cansleep(rstn, 0); udelay(1); - gpio_set_value(rstn, 1); + gpio_set_value_cansleep(rstn, 1); usleep_range(120, 240); } diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 1253f864737a..ef688518ad77 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg) { struct usb_device *usb_dev = atusb->usb_dev; int ret; + uint8_t *buffer; uint8_t value; + buffer = kmalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, - 0, reg, &value, 1, 1000); - return ret >= 0 ? value : ret; + 0, reg, buffer, 1, 1000); + + if (ret >= 0) { + value = buffer[0]; + kfree(buffer); + return value; + } else { + kfree(buffer); + return ret; + } } static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, @@ -549,13 +562,6 @@ static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; - struct device *dev = &atusb->usb_dev->dev; - - if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { - dev_info(dev, "Automatic frame retransmission is only available from " - "firmware version 0.3. Please update if you want this feature."); - return -EINVAL; - } return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); } @@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = { static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[3]; + unsigned char *buffer; int ret; + buffer = kmalloc(3, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Get a couple of the ATMega Firmware values */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, @@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb) dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } + kfree(buffer); return ret; } static int atusb_get_and_show_build(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - char build[ATUSB_BUILD_SIZE + 1]; + char *build; int ret; + build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); + if (!build) + return -ENOMEM; + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); @@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb) dev_info(&usb_dev->dev, "Firmware: build %s\n", build); } + kfree(build); return ret; } @@ -698,7 +714,7 @@ fail: static int atusb_set_extended_addr(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; + unsigned char *buffer; __le64 extended_addr; u64 addr; int ret; @@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb) return 0; } + buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Firmware is new enough so we fetch the address from EEPROM */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000); - if (ret < 0) - dev_err(&usb_dev->dev, "failed to fetch extended address\n"); + if (ret < 0) { + dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n"); + ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); + kfree(buffer); + return ret; + } memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); /* Check if read address is not empty and the unicast bit is set correctly */ @@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb) &addr); } + kfree(buffer); return ret; } @@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface, hw->parent = &usb_dev->dev; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | - IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS | - IEEE802154_HW_FRAME_RETRIES; + IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; @@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface, atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); + if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3) + hw->flags |= IEEE802154_HW_FRAME_RETRIES; + ret = atusb_get_and_clear_error(atusb); if (ret) { dev_err(&atusb->usb_dev->dev, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index e84ae084e259..ca1b462bf7b2 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -132,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)) return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; return of_property_read_u32(of_node, "ti,fifo-depth", diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index be418563cb18..f3b48ad90865 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; - if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) + if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); @@ -4356,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf, NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + if (tp->version == RTL_VER_01) { + netdev->features &= ~NETIF_F_RXCSUM; + netdev->hw_features &= ~NETIF_F_RXCSUM; + } + netdev->ethtool_ops = &ops; netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bb70dd5723b5..ca7196c40060 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, struct vxlan_sock *sock4, struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr, + __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; fl4.saddr = *saddr; + fl4.fl4_dport = dport; + fl4.fl4_sport = sport; rt = ip_route_output_key(vxlan->net, &fl4); if (likely(!IS_ERR(rt))) { @@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, __be32 label, const struct in6_addr *daddr, struct in6_addr *saddr, + __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = dport; + fl6.fl6_sport = sport; err = ipv6_stub->ipv6_dst_lookup(vxlan->net, sock6->sock->sk, @@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, &src->sin.sin_addr.s_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(rt)) { err = PTR_ERR(rt); @@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, label, &dst->sin6.sin6_addr, &src->sin6.sin6_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); @@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, info->key.u.ipv4.dst, - &info->key.u.ipv4.src, NULL, info); + &info->key.u.ipv4.src, dport, sport, NULL, info); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, NULL, info); + &info->key.u.ipv6.src, dport, sport, NULL, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 6307088b375f..a518cb1b59d4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) { resource_size_t allocated = 0, available = 0; struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_common *ndns = to_ndns(dev); struct nd_mapping *nd_mapping; struct nvdimm_drvdata *ndd; struct nd_label_id label_id; @@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) u8 *uuid = NULL; int rc, i; - if (dev->driver || to_ndns(dev)->claim) + if (dev->driver || ndns->claim) return -EBUSY; if (is_namespace_pmem(dev)) { @@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) nd_namespace_pmem_set_resource(nd_region, nspm, val * nd_region->ndr_mappings); - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - /* - * Try to delete the namespace if we deleted all of its - * allocation, this is not the seed device for the - * region, and it is not actively claimed by a btt - * instance. - */ - if (val == 0 && nd_region->ns_seed != dev - && !nsblk->common.claim) - nd_device_unregister(dev, ND_ASYNC); } + /* + * Try to delete the namespace if we deleted all of its + * allocation, this is not the seed device for the region, and + * it is not actively claimed by a btt instance. + */ + if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) + nd_device_unregister(dev, ND_ASYNC); + return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7282d7495bf1..5b536be5a12e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off, rc = memcpy_from_pmem(mem + off, pmem_addr, len); kunmap_atomic(mem); - return rc; + if (rc) + return -EIO; + return 0; } static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 1f38d0836751..f1b633bce525 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev) rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", xgene_msi_hwirq_alloc, NULL); - if (rc) + if (rc < 0) goto err_cpuhp; pci_xgene_online = rc; rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index bed19994c1e9..af8f6e92e885 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; - /* get iATU unroll support */ - pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); - dev_dbg(pp->dev, "iATU unroll: %s\n", - pp->iatu_unroll_enabled ? "enabled" : "disabled"); - /* set the number of lanes */ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; @@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * we should not program the ATU here. */ if (!pp->ops->rd_other_conf) { + /* get iATU unroll support */ + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); + dev_dbg(pp->dev, "iATU unroll: %s\n", + pp->iatu_unroll_enabled ? "enabled" : "disabled"); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e164b5c9f0f0..204960e70333 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev) pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; + pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_flags_reg = reg16; @@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; /* - * A Root Port is always the upstream end of a Link. No PCIe - * component has two Links. Two Links are connected by a Switch - * that has a Port on each Link and internal logic to connect the - * two Ports. + * A Root Port or a PCI-to-PCIe bridge is always the upstream end + * of a Link. No PCIe component has two Links. Two Links are + * connected by a Switch that has a Port on each Link and internal + * logic to connect the two Ports. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT) + if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_PCIE_BRIDGE) pdev->has_secondary_link = 1; else if (type == PCI_EXP_TYPE_UPSTREAM || type == PCI_EXP_TYPE_DOWNSTREAM) { diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 639ed4e6afd1..070c4da95f48 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define CCW_CMD_WRITE_CONF 0x21 #define CCW_CMD_WRITE_STATUS 0x31 #define CCW_CMD_READ_VQ_CONF 0x32 +#define CCW_CMD_READ_STATUS 0x72 #define CCW_CMD_SET_IND_ADAPTER 0x73 #define CCW_CMD_SET_VIRTIO_REV 0x83 @@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 +#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) @@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) * This may happen on device detach. */ if (ret && (ret != -ENODEV)) - dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", + dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", ret, index); vring_del_virtqueue(vq); @@ -892,6 +894,28 @@ out_free: static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + u8 old_status = *vcdev->status; + struct ccw1 *ccw; + + if (vcdev->revision < 1) + return *vcdev->status; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return old_status; + + ccw->cmd_code = CCW_CMD_READ_STATUS; + ccw->flags = 0; + ccw->count = sizeof(*vcdev->status); + ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); +/* + * If the channel program failed (should only happen if the device + * was hotunplugged, and then we clean up via the machine check + * handler anyway), vcdev->status was not overwritten and we just + * return the old status, which is fine. +*/ + kfree(ccw); return *vcdev->status; } @@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) kfree(ccw); } -static struct virtio_config_ops virtio_ccw_config_ops = { +static const struct virtio_config_ops virtio_ccw_config_ops = { .get_features = virtio_ccw_get_features, .finalize_features = virtio_ccw_finalize_features, .get = virtio_ccw_get_config, @@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, case VIRTIO_CCW_DOING_READ_CONFIG: case VIRTIO_CCW_DOING_WRITE_CONFIG: case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_READ_STATUS: case VIRTIO_CCW_DOING_SET_VQ: case VIRTIO_CCW_DOING_SET_IND: case VIRTIO_CCW_DOING_SET_CONF_IND: diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index a9a00169ad91..b2e8c0dfc79c 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job) struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; - struct fc_bsg_request *bsg_request = bsg_request; + struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t command_type = bsg_request->msgcode; unsigned long flags; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 8fb5c54c7dd3..99b747cedbeb 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -46,6 +46,7 @@ #define INITIAL_SRP_LIMIT 800 #define DEFAULT_MAX_SECTORS 256 +#define MAX_TXU 1024 * 1024 static uint max_vdma_size = MAX_H_COPY_RDMA; @@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, } info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, - GFP_KERNEL); + GFP_ATOMIC); if (!info) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); @@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, info->mad_version = cpu_to_be32(MAD_VERSION_1); info->os_type = cpu_to_be32(LINUX); memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); - info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); + info->port_max_txu[0] = cpu_to_be32(MAX_TXU); dma_wmb(); rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, @@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) } cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, - GFP_KERNEL); + GFP_ATOMIC); if (!cap) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 236e4e51d161..7b6bd8ed0d0b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4faa7672fc1d..a78a3df68f67 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 394fe1338d09..dcb33f4fa687 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -393,6 +393,7 @@ struct MPT3SAS_TARGET { * @eedp_enable: eedp support enable bit * @eedp_type: 0(type_1), 1(type_2), 2(type_3) * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device */ struct MPT3SAS_DEVICE { struct MPT3SAS_TARGET *sas_target; @@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE { u8 ignore_delay_remove; /* Iopriority Command Handling */ u8 ncq_prio_enable; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b5c966e319d3..75f3fce1c867 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } -static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) { - return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; } /** @@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, - SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); - /* - * Lock the device for any subsequent command until command is - * done. - */ - if (ata_12_16_cmd(scmd)) - scsi_internal_device_block(scmd->device); - sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 47eb4d545d13..f201f4099620 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; + mutex_lock(&ha->optrom_mutex); + if (ha->optrom_state != QLA_SREADING) - return 0; + goto out; - mutex_lock(&ha->optrom_mutex); rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); + +out: mutex_unlock(&ha->optrom_mutex); return rval; @@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - if (ha->optrom_state != QLA_SWRITING) + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); return -EINVAL; - if (off > ha->optrom_region_size) + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); return -ERANGE; + } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; - mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); @@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); int type; - int rval = 0; port_id_t did; type = simple_strtol(buf, NULL, 10); @@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); - rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); + qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); return count; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index f7df01b76714..5b1287a63c49 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1556,7 +1556,8 @@ typedef struct { struct atio { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ }; @@ -2732,7 +2733,7 @@ struct isp_operations { #define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) #define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) -#define QLA_MSIX_DEFAULT 0x00 +#define QLA_BASE_VECTORS 2 /* default + RSP */ #define QLA_MSIX_RSP_Q 0x01 #define QLA_ATIO_VECTOR 0x02 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 @@ -2754,7 +2755,6 @@ struct qla_msix_entry { uint16_t entry; char name[30]; void *handle; - struct irq_affinity_notify irq_notify; int cpuid; }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 632d5f30386a..7b6317c8c2e9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) /* Wait for soft-reset to complete. */ RD_REG_DWORD(®->ctrl_status); - for (cnt = 0; cnt < 6000000; cnt++) { + for (cnt = 0; cnt < 60; cnt++) { barrier(); if ((RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) @@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) RD_REG_DWORD(®->hccr); RD_REG_WORD(®->mailbox0); - for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 && + for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5093ca9b02ec..dc88a09f9043 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static void qla_irq_affinity_notify(struct irq_affinity_notify *, - const cpumask_t *); -static void qla_irq_affinity_release(struct kref *); - /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); @@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!vha->flags.online) return; - if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) { - /* if kernel does not notify qla of IRQ's CPU change, - * then set it here. - */ - rsp->msix->cpuid = smp_processor_id(); - ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid; - } - while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = { static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { -#define MIN_MSIX_COUNT 2 int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct irq_affinity desc = { + .pre_vectors = QLA_BASE_VECTORS, + }; + + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) + desc.pre_vectors++; + + ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS, + ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); - ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " @@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->have_irq = 0; qentry->in_use = 0; qentry->handle = NULL; - qentry->irq_notify.notify = qla_irq_affinity_notify; - qentry->irq_notify.release = qla_irq_affinity_release; - qentry->cpuid = -1; } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { + for (i = 0; i < QLA_BASE_VECTORS; i++) { qentry = &ha->msix_entries[i]; qentry->handle = rsp; rsp->msix = qentry; @@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) goto msix_register_fail; qentry->have_irq = 1; qentry->in_use = 1; - - /* Register for CPU affinity notification. */ - irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); - - /* Schedule work (ie. trigger a notification) to read cpu - * mask for this specific irq. - * kref_get is required because - * irq_affinity_notify() will do - * kref_put(). - */ - kref_get(&qentry->irq_notify.kref); - schedule_work(&qentry->irq_notify.work); } /* @@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, msix->handle = qpair; return ret; } - - -/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */ -static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct qla_hw_data *ha; - struct scsi_qla_host *base_vha; - struct rsp_que *rsp = e->handle; - - /* user is recommended to set mask to just 1 cpu */ - e->cpuid = cpumask_first(mask); - - ha = rsp->hw; - base_vha = pci_get_drvdata(ha->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host %ld : vector %d cpu %d \n", __func__, - base_vha->host_no, e->vector, e->cpuid); - - if (e->have_irq) { - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && - (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) { - ha->tgt.rspq_vector_cpuid = e->cpuid; - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: rspq vector %d cpu %d runtime change\n", - __func__, base_vha->host_no, e->vector, e->cpuid); - } - } -} - -static void qla_irq_affinity_release(struct kref *ref) -{ - struct irq_affinity_notify *notify = - container_of(ref, struct irq_affinity_notify, kref); - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct rsp_que *rsp = e->handle; - struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: vector %d cpu %d\n", __func__, - base_vha->host_no, e->vector, e->cpuid); -} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2819ceb96041..67f64db390b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,7 +10,7 @@ #include <linux/delay.h> #include <linux/gfp.h> -struct rom_cmd { +static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { { MBC_LOAD_RAM }, @@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - /* if PCI error, then avoid mbx processing.*/ - if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x1191, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; - } + } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } } else { - uint16_t mb0; - uint32_t ictrl; + uint16_t mb[8]; + uint32_t ictrl, host_status, hccr; uint16_t w; if (IS_FWI2_CAPABLE(ha)) { - mb0 = RD_REG_WORD(®->isp24.mailbox0); + mb[0] = RD_REG_WORD(®->isp24.mailbox0); + mb[1] = RD_REG_WORD(®->isp24.mailbox1); + mb[2] = RD_REG_WORD(®->isp24.mailbox2); + mb[3] = RD_REG_WORD(®->isp24.mailbox3); + mb[7] = RD_REG_WORD(®->isp24.mailbox7); ictrl = RD_REG_DWORD(®->isp24.ictrl); + host_status = RD_REG_DWORD(®->isp24.host_status); + hccr = RD_REG_DWORD(®->isp24.hccr); + + ql_log(ql_log_warn, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", + command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], + mb[7], host_status, hccr); + } else { - mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); + mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); ictrl = RD_REG_WORD(®->isp.ictrl); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); } - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, - "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " - "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* Capture FW dump only, if PCI device active */ @@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - int configured_count; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, "Entered %s.\n", __func__); @@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); } else { - configured_count = mcp->mb[11]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, "Done %s.\n", __func__); } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 54380b434b30..0a1723cc08cf 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized; (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) +const int MD_MIU_TEST_AGT_RDDATA[] = { + 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC +}; + static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 6201dce3553b..77624eac95a4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue { #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 -static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, - 0x410000B8, 0x410000BC }; +extern const int MD_MIU_TEST_AGT_RDDATA[4]; #define CRB_NIU_XG_PAUSE_CTL_P0 0x1 #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 007192d7bad8..dc1ec9b61027 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -15,6 +15,23 @@ #define TIMEOUT_100_MS 100 +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + /* 8044 Flash Read/Write functions */ uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 02fe3c4cdf55..83c1b7e17c80 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -535,23 +535,6 @@ enum qla_regs { #define CRB_CMDPEG_CHECK_RETRY_COUNT 60 #define CRB_CMDPEG_CHECK_DELAY 500 -static const uint32_t qla8044_reg_tbl[] = { - QLA8044_PEG_HALT_STATUS1, - QLA8044_PEG_HALT_STATUS2, - QLA8044_PEG_ALIVE_COUNTER, - QLA8044_CRB_DRV_ACTIVE, - QLA8044_CRB_DEV_STATE, - QLA8044_CRB_DRV_STATE, - QLA8044_CRB_DRV_SCRATCH, - QLA8044_CRB_DEV_PART_INFO1, - QLA8044_CRB_IDC_VER_MAJOR, - QLA8044_FW_VER_MAJOR, - QLA8044_FW_VER_MINOR, - QLA8044_FW_VER_SUB, - QLA8044_CMDPEG_STATE, - QLA8044_ASIC_TEMP, -}; - /* MiniDump Structures */ /* Driver_code is for driver to write some info about the entry diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8521cfe302e9..0a000ecf0881 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) continue; rsp = ha->rsp_q_map[cnt]; - clear_bit(cnt, ha->req_qid_map); + clear_bit(cnt, ha->rsp_qid_map); ha->rsp_q_map[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_rsp_que(ha, rsp); @@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct ct6_dsd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ctx_cachep) - goto fail_free_gid_list; + goto fail_free_srb_mempool; } ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ctx_cachep); @@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), GFP_KERNEL); if (!ha->loop_id_map) - goto fail_async_pd; + goto fail_loop_id_map; else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, @@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, return 0; +fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: @@ -3851,6 +3853,10 @@ fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); @@ -3868,10 +3874,12 @@ fail_free_nvram: kfree(ha->nvram); ha->nvram = NULL; fail_free_ctx_mempool: - mempool_destroy(ha->ctx_mempool); + if (ha->ctx_mempool) + mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; fail_free_srb_mempool: - mempool_destroy(ha->srb_mempool); + if (ha->srb_mempool) + mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; fail_free_gid_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bff9689f5ca9..e4fda84b959e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; - uint32_t unpacked_lun, lun = 0; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; - struct atio_from_isp *a = (struct atio_from_isp *)iocb; unsigned long flags; loop_id = le16_to_cpu(n->u.isp24.nport_handle); @@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); - lun = a->u.isp24.fcp_cmnd.lun; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); - - return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, - iocb, QLA24XX_MGMT_SEND_NACK); + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } /* ha->tgt.sess_lock supposed to be held on entry */ @@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; @@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, #if 0 /* Todo */ if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#else + if (rc) { + } #endif goto done; } @@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) if (!vha->flags.online) return; - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, - ha_locked); + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xffff, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; @@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) /* Disable Full Login after LIP */ nv->host_p &= cpu_to_le32(~BIT_10); + + /* + * clear BIT 15 explicitly as we have seen at least + * a couple of instances where this was set and this + * was causing the firmware to not be initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); /* Enable target PRLI control */ nv->firmware_options_2 |= cpu_to_le32(BIT_14); } else { @@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = @@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); - /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_9); + /* + * clear BIT 15 explicitly as we have seen at + * least a couple of instances where this was set + * and this was causing the firmware to not be + * initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); if (ql2xtgt_tape_enable) /* Enable FC tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); @@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f60eedd..0824a8164a24 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -427,13 +427,33 @@ struct atio_from_isp { struct { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; } __packed; +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 36935c9ed669..8a58ef3adab4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring; + + if (atr || !buf) { + length = ha->tgt.atio_q_length; + qla27xx_insert16(0, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd026, "%s: unknown queue %x\n", __func__, ent->t263.queue_type); @@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring_ptr; + + if (atr || !buf) { + qla27xx_insert16(0, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(ha->tgt.atio_q_in ? + readl(ha->tgt.atio_q_in) : 0, buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, "%s: unknown queue %x\n", __func__, ent->t274.queue_type); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 6643f6fc7795..d925910be761 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, { return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); } @@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void) int ret; pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); ret = target_register_template(&tcm_qla2xxx_ops); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 37e026a4823d..cf8430be183b 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -1,7 +1,6 @@ #include <target/target_core_base.h> #include <linux/btree.h> -#define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1fbb1ecf49f2..0b09638fa39b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2585,7 +2585,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; @@ -2768,13 +2769,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); } - sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) - q->limits.zoned = BLK_ZONED_HA; - else if (sdkp->device->type == TYPE_ZBC) + if (sdkp->device->type == TYPE_ZBC) { + /* Host-managed */ q->limits.zoned = BLK_ZONED_HM; - else - q->limits.zoned = BLK_ZONED_NONE; + } else { + sdkp->zoned = (buffer[8] >> 4) & 3; + if (sdkp->zoned == 1) + /* Host-aware */ + q->limits.zoned = BLK_ZONED_HA; + else + /* + * Treat drive-managed devices as + * regular block devices. + */ + q->limits.zoned = BLK_ZONED_NONE; + } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 8c9a35c91705..50adabbb5808 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - if (scsi_is_sas_rphy(&sdev->sdev_gendev)) + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); if (efd.addr) { diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 8823cc81ae45..5bb376009d98 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) if (IS_ERR(task)) { dev_err(dev, "can't create rproc_boot thread\n"); + ret = PTR_ERR(task); goto err_put_rproc; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ec4aa252d6e8..2922a9908302 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -378,6 +378,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO + depends on HAS_DMA depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST help This enables support for the Freescale DSPI controller in master diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index e89da0af45d2..0314c6b9e044 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev) struct spi_master *master; struct a3700_spi *spi; u32 num_cs = 0; - int ret = 0; + int irq, ret = 0; master = spi_alloc_master(dev, sizeof(*spi)); if (!master) { @@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev) master->unprepare_message = a3700_spi_unprepare_message; master->set_cs = a3700_spi_set_cs; master->flags = SPI_MASTER_HALF_DUPLEX; - master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | + master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD); platform_set_drvdata(pdev, master); @@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev) goto error; } - spi->irq = platform_get_irq(pdev, 0); - if (spi->irq < 0) { - dev_err(dev, "could not get irq: %d\n", spi->irq); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "could not get irq: %d\n", irq); ret = -ENXIO; goto error; } + spi->irq = irq; init_completion(&spi->done); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 319225d7e761..6ab4c7700228 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev) SPI_ENGINE_VERSION_MAJOR(version), SPI_ENGINE_VERSION_MINOR(version), SPI_ENGINE_VERSION_PATCH(version)); - return -ENODEV; + ret = -ENODEV; + goto err_put_master; } spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index d36c11b73a35..02fb96797ac8 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index e31971f91475..837cb8d0bac6 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) static void mid_spi_dma_stop(struct dw_spi *dws) { if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->txchan); + dmaengine_terminate_sync(dws->txchan); clear_bit(TX_BUSY, &dws->dma_chan_busy); } if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->rxchan); + dmaengine_terminate_sync(dws->rxchan); clear_bit(RX_BUSY, &dws->dma_chan_busy); } } diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index b715a26a9148..054012f87567 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = { static int dw_spi_debugfs_init(struct dw_spi *dws) { - dws->debugfs = debugfs_create_dir("dw_spi", NULL); + char name[128]; + + snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev)); + dws->debugfs = debugfs_create_dir(name, NULL); if (!dws->debugfs) return -ENOMEM; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index dd7b5b47291d..d6239fa718be 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); pxa2xx_spi_write(drv_data, SSCR0, tmp); + break; default: tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0012ad02e569..1f00eeb0b5a3 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = { }; static const struct of_device_id sh_msiof_match[] = { - { .compatible = "renesas,sh-msiof", .data = &sh_data }, { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data }, + { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ {}, }; MODULE_DEVICE_TABLE(of, sh_msiof_match); diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index b811b0fb61b1..4c7796512453 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -118,12 +118,12 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(struct chip_tsadc_table table, + int (*get_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int *temp); - void (*set_alarm_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); - void (*set_tshut_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); + int (*set_alarm_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); + int (*set_tshut_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); /* Per-table methods */ @@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = { {3452, 115000}, {3437, 120000}, {3421, 125000}, + {0, 125000}, }; static const struct tsadc_table rk3368_code_table[] = { @@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = { {TSADCV3_DATA_MASK, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, +static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table, int temp) { int high, low, mid; - u32 error = 0; + unsigned long num; + unsigned int denom; + u32 error = table->data_mask; low = 0; - high = table.length - 1; + high = (table->length - 1) - 1; /* ignore the last check for table */ mid = (high + low) / 2; /* Return mask code data when the temp is over table range */ - if (temp < table.id[low].temp || temp > table.id[high].temp) { - error = table.data_mask; + if (temp < table->id[low].temp || temp > table->id[high].temp) goto exit; - } while (low <= high) { - if (temp == table.id[mid].temp) - return table.id[mid].code; - else if (temp < table.id[mid].temp) + if (temp == table->id[mid].temp) + return table->id[mid].code; + else if (temp < table->id[mid].temp) high = mid - 1; else low = mid + 1; mid = (low + high) / 2; } + /* + * The conversion code granularity provided by the table. Let's + * assume that the relationship between temperature and + * analog value between 2 table entries is linear and interpolate + * to produce less granular result. + */ + num = abs(table->id[mid + 1].code - table->id[mid].code); + num *= temp - table->id[mid].temp; + denom = table->id[mid + 1].temp - table->id[mid].temp; + + switch (table->mode) { + case ADC_DECREMENT: + return table->id[mid].code - (num / denom); + case ADC_INCREMENT: + return table->id[mid].code + (num / denom); + default: + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return error; + } + exit: - pr_err("Invalid the conversion, error=%d\n", error); + pr_err("%s: invalid temperature, temp=%d error=%d\n", + __func__, temp, error); return error; } -static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, - int *temp) +static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table, + u32 code, int *temp) { unsigned int low = 1; - unsigned int high = table.length - 1; + unsigned int high = table->length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - WARN_ON(table.length < 2); + WARN_ON(table->length < 2); - switch (table.mode) { + switch (table->mode) { case ADC_DECREMENT: - code &= table.data_mask; - if (code < table.id[high].code) + code &= table->data_mask; + if (code <= table->id[high].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code >= table.id[mid].code && - code < table.id[mid - 1].code) + if (code >= table->id[mid].code && + code < table->id[mid - 1].code) break; - else if (code < table.id[mid].code) + else if (code < table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; case ADC_INCREMENT: - code &= table.data_mask; - if (code < table.id[low].code) + code &= table->data_mask; + if (code < table->id[low].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code <= table.id[mid].code && - code > table.id[mid - 1].code) + if (code <= table->id[mid].code && + code > table->id[mid - 1].code) break; - else if (code > table.id[mid].code) + else if (code > table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; default: - pr_err("Invalid the conversion table\n"); + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return -EINVAL; } /* @@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = table.id[mid].temp - table.id[mid - 1].temp; - num *= abs(table.id[mid - 1].code - code); - denom = abs(table.id[mid - 1].code - table.id[mid].code); - *temp = table.id[mid - 1].temp + (num / denom); + num = table->id[mid].temp - table->id[mid - 1].temp; + num *= abs(table->id[mid - 1].code - code); + denom = abs(table->id[mid - 1].code - table->id[mid].code); + *temp = table->id[mid - 1].temp + (num / denom); return 0; } @@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, +static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int *temp) { u32 val; @@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { - u32 alarm_value, int_en; + u32 alarm_value; + u32 int_en, int_clr; + + /* + * In some cases, some sensors didn't need the trip points, the + * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm + * in the end, ignore this case and disable the high temperature + * interrupt. + */ + if (temp == INT_MAX) { + int_clr = readl_relaxed(regs + TSADCV2_INT_EN); + int_clr &= ~TSADCV2_INT_SRC_EN(chn); + writel_relaxed(int_clr, regs + TSADCV2_INT_EN); + return 0; + } /* Make sure the value is valid */ alarm_value = rk_tsadcv2_temp_to_code(table, temp); - if (alarm_value == table.data_mask) - return; + if (alarm_value == table->data_mask) + return -ERANGE; - writel_relaxed(alarm_value & table.data_mask, + writel_relaxed(alarm_value & table->data_mask, regs + TSADCV2_COMP_INT(chn)); int_en = readl_relaxed(regs + TSADCV2_INT_EN); int_en |= TSADCV2_INT_SRC_EN(chn); writel_relaxed(int_en, regs + TSADCV2_INT_EN); + + return 0; } -static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); - if (tshut_value == table.data_mask) - return; + if (tshut_value == table->data_mask) + return -ERANGE; writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ val = readl_relaxed(regs + TSADCV2_AUTO_CON); writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); + + return 0; } static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, @@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high) dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", __func__, sensor->id, low, high); - tsadc->set_alarm_temp(tsadc->table, - sensor->id, thermal->regs, high); - - return 0; + return tsadc->set_alarm_temp(&tsadc->table, + sensor->id, thermal->regs, high); } static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) @@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(tsadc->table, + retval = tsadc->get_temp(&tsadc->table, sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + + error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); sensor->thermal = thermal; sensor->id = id; @@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(thermal->chip->table, + + error = thermal->chip->set_tshut_temp(&thermal->chip->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); } thermal->chip->control(thermal->regs, true); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 641faab6e24b..655591316a88 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -799,6 +799,11 @@ static void thermal_release(struct device *dev) if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + kfree(tz->trip_hyst_attrs); + kfree(tz->trips_attribute_group.attrs); + kfree(tz->device.groups); kfree(tz); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { @@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, 0); - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); @@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); - kfree(tz->device.groups); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 541af5946203..c4a508a124dc 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -59,14 +59,6 @@ static LIST_HEAD(thermal_hwmon_list); static DEFINE_MUTEX(thermal_hwmon_list_lock); static ssize_t -name_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", hwmon->type); -} -static DEVICE_ATTR_RO(name); - -static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) { int temperature; @@ -165,15 +157,12 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) INIT_LIST_HEAD(&hwmon->tz_list); strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); - hwmon->device = hwmon_device_register(NULL); + hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type, + hwmon, NULL, NULL); if (IS_ERR(hwmon->device)) { result = PTR_ERR(hwmon->device); goto free_mem; } - dev_set_drvdata(hwmon->device, hwmon); - result = device_create_file(hwmon->device, &dev_attr_name); - if (result) - goto free_mem; register_sys_interface: temp = kzalloc(sizeof(*temp), GFP_KERNEL); @@ -222,10 +211,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) free_temp_mem: kfree(temp); unregister_name: - if (new_hwmon_device) { - device_remove_file(hwmon->device, &dev_attr_name); + if (new_hwmon_device) hwmon_device_unregister(hwmon->device); - } free_mem: if (new_hwmon_device) kfree(hwmon); @@ -267,7 +254,6 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) list_del(&hwmon->node); mutex_unlock(&thermal_hwmon_list_lock); - device_remove_file(hwmon->device, &dev_attr_name); hwmon_device_unregister(hwmon->device); kfree(hwmon); } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 4016dae7433b..52747b6ac89a 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -134,42 +134,35 @@ enum snoop_when { #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ -static unsigned usbfs_memory_mb = 16; +static u32 usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); -/* Hard limit, necessary to avoid arithmetic overflow */ -#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) - -static atomic_t usbfs_memory_usage; /* Total memory currently allocated */ +static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ -static int usbfs_increase_memory_usage(unsigned amount) +static int usbfs_increase_memory_usage(u64 amount) { - unsigned lim; + u64 lim; - /* - * Convert usbfs_memory_mb to bytes, avoiding overflows. - * 0 means use the hard limit (effectively unlimited). - */ lim = ACCESS_ONCE(usbfs_memory_mb); - if (lim == 0 || lim > (USBFS_XFER_MAX >> 20)) - lim = USBFS_XFER_MAX; - else - lim <<= 20; + lim <<= 20; - atomic_add(amount, &usbfs_memory_usage); - if (atomic_read(&usbfs_memory_usage) <= lim) - return 0; - atomic_sub(amount, &usbfs_memory_usage); - return -ENOMEM; + atomic64_add(amount, &usbfs_memory_usage); + + if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) { + atomic64_sub(amount, &usbfs_memory_usage); + return -ENOMEM; + } + + return 0; } /* Memory for a transfer is being deallocated */ -static void usbfs_decrease_memory_usage(unsigned amount) +static void usbfs_decrease_memory_usage(u64 amount) { - atomic_sub(amount, &usbfs_memory_usage); + atomic64_sub(amount, &usbfs_memory_usage); } static int connected(struct usb_dev_state *ps) @@ -1191,7 +1184,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg) if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) return -EINVAL; len1 = bulk.len; - if (len1 >= USBFS_XFER_MAX) + if (len1 >= (INT_MAX - sizeof(struct urb))) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) @@ -1584,10 +1577,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb return -EINVAL; } - if (uurb->buffer_length >= USBFS_XFER_MAX) { - ret = -EINVAL; - goto error; - } if (uurb->buffer_length > 0 && !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 479e223f9cff..612fab6e54fb 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3017,6 +3017,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) } usb_put_invalidate_rhdev(hcd); + hcd->flags = 0; } EXPORT_SYMBOL_GPL(usb_remove_hcd); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index dea55914d641..2184ef40a82a 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -122,12 +122,11 @@ static int usb_internal_control_msg(struct usb_device *usb_dev, * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * - * Don't use this function from within an interrupt context, like a bottom half - * handler. If you need an asynchronous message, or need to send a message - * from within interrupt context, use usb_submit_urb(). - * If a thread in your driver uses this call, make sure your disconnect() - * method can wait for it to complete. Since you don't have a handle on the - * URB used, you can't cancel the request. + * Don't use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb(). If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. * * Return: If successful, the number of bytes transferred. Otherwise, a negative * error number. @@ -173,12 +172,11 @@ EXPORT_SYMBOL_GPL(usb_control_msg); * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * - * Don't use this function from within an interrupt context, like a bottom half - * handler. If you need an asynchronous message, or need to send a message - * from within interrupt context, use usb_submit_urb() If a thread in your - * driver uses this call, make sure your disconnect() method can wait for it to - * complete. Since you don't have a handle on the URB used, you can't cancel - * the request. + * Don't use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb() If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual @@ -207,12 +205,11 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg); * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * - * Don't use this function from within an interrupt context, like a bottom half - * handler. If you need an asynchronous message, or need to send a message - * from within interrupt context, use usb_submit_urb() If a thread in your - * driver uses this call, make sure your disconnect() method can wait for it to - * complete. Since you don't have a handle on the URB used, you can't cancel - * the request. + * Don't use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb() If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index eb579b6f68f0..bc3b3fda5000 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3268,7 +3268,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || @@ -3847,8 +3847,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, __func__, epctrl, epctrl_reg); /* Allocate DMA descriptor chain for non-ctrl endpoints */ - if (using_desc_dma(hsotg)) { - hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, + if (using_desc_dma(hsotg) && !hs_ep->desc_list) { + hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), &hs_ep->desc_list_dma, GFP_ATOMIC); @@ -3971,7 +3971,7 @@ error1: error2: if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), hs_ep->desc_list, hs_ep->desc_list_dma); hs_ep->desc_list = NULL; @@ -4001,14 +4001,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) return -EINVAL; } - /* Remove DMA memory allocated for non-control Endpoints */ - if (using_desc_dma(hsotg)) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * - sizeof(struct dwc2_dma_desc), - hs_ep->desc_list, hs_ep->desc_list_dma); - hs_ep->desc_list = NULL; - } - epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); spin_lock_irqsave(&hsotg->lock, flags); @@ -4229,7 +4221,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index d89dbfacf0a4..a73722e27d07 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4380,6 +4380,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (!HCD_HW_ACCESSIBLE(hcd)) goto unlock; + if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) + goto unlock; + if (!hsotg->params.hibernation) goto skip_power_saving; @@ -4502,8 +4505,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, { #ifdef VERBOSE_DEBUG struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); - char *pipetype; - char *speed; + char *pipetype = NULL; + char *speed = NULL; dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); dev_vdbg(hsotg->dev, " Device address: %d\n", diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 84229fc8eb77..1515d45ebcec 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -136,7 +136,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev) exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); if (IS_ERR(exynos->axius_clk)) { dev_err(dev, "no AXI UpScaler clk specified\n"); - return -ENODEV; + ret = -ENODEV; + goto axius_clk_err; } clk_prepare_enable(exynos->axius_clk); } else { @@ -194,6 +195,7 @@ err3: regulator_disable(exynos->vdd33); err2: clk_disable_unprepare(exynos->axius_clk); +axius_clk_err: clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); return ret; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 002822d98fda..49d685ad0da9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; - kfree(cdev->os_desc_req); + usb_ep_free_request(ep0, cdev->os_desc_req); goto end; } cdev->os_desc_req->context = cdev; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index f5d6bf527aac..c46666364c87 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); @@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) __ffs_epfile_read_buffer_free(epfile); ++epfile; } - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } @@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while(count--) { struct usb_endpoint_descriptor *ds; int desc_idx; @@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) ++ep; ++epfile; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); return ret; @@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c, /* cleanup after autoconfig */ spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { if (ep->ep && ep->req) usb_ep_free_request(ep->ep, ep->req); ep->req = NULL; ++ep; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); kfree(func->eps); func->eps = NULL; diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f35949d3e8ba..11bbce28bc23 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -2135,7 +2135,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); goto err; } - ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); + sprintf(ep->name, "ep%d", ep->index); + ep->ep.name = ep->name; ep->ep_regs = udc->regs + USBA_EPT_BASE(i); ep->dma_regs = udc->regs + USBA_DMA_BASE(i); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h index a186cdde1f58..9551b704bfd3 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.h +++ b/drivers/usb/gadget/udc/atmel_usba_udc.h @@ -286,6 +286,7 @@ struct usba_ep { void __iomem *ep_regs; void __iomem *dma_regs; void __iomem *fifo; + char name[8]; struct usb_ep ep; struct usba_udc *udc; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 6361fc739306..407d947b34ea 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -45,9 +45,9 @@ config USB_XHCI_PLATFORM If unsure, say N. config USB_XHCI_MTK - tristate "xHCI support for Mediatek MT65xx" + tristate "xHCI support for Mediatek MT65xx/MT7621" select MFD_SYSCON - depends on ARCH_MEDIATEK || COMPILE_TEST + depends on (MIPS && SOC_MT7621) || ARCH_MEDIATEK || COMPILE_TEST ---help--- Say 'Y' to enable the support for the xHCI host controller found in Mediatek MT65xx SoCs. diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 42e5b66353ef..7a603f66a9bc 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -77,10 +77,12 @@ static int exynos_ehci_get_phy(struct device *dev, if (IS_ERR(phy)) { ret = PTR_ERR(phy); if (ret == -EPROBE_DEFER) { + of_node_put(child); return ret; } else if (ret != -ENOSYS && ret != -ENODEV) { dev_err(dev, "Error retrieving usb2 phy: %d\n", ret); + of_node_put(child); return ret; } } diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 91701cc68082..3733aab46efe 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -600,7 +600,7 @@ static int ehci_fsl_drv_restore(struct device *dev) return 0; } -static struct dev_pm_ops ehci_fsl_pm_ops = { +static const struct dev_pm_ops ehci_fsl_pm_ops = { .suspend = ehci_fsl_drv_suspend, .resume = ehci_fsl_drv_resume, .restore = ehci_fsl_drv_restore, diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index 2cd105be7319..6865b919403f 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -66,10 +66,12 @@ static int exynos_ohci_get_phy(struct device *dev, if (IS_ERR(phy)) { ret = PTR_ERR(phy); if (ret == -EPROBE_DEFER) { + of_node_put(child); return ret; } else if (ret != -ENOSYS && ret != -ENODEV) { dev_err(dev, "Error retrieving usb2 phy: %d\n", ret); + of_node_put(child); return ret; } } diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index b08e385399b9..a4d814b7f380 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -227,8 +227,7 @@ static int ohci_omap_reset(struct usb_hcd *hcd) return status; } } else { - dev_err(hcd->self.controller, "can't find phy\n"); - return -ENODEV; + return -EPROBE_DEFER; } ohci->start_hnp = start_hnp; } diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 4e4d601af35c..bcf531c44c70 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -2288,9 +2288,7 @@ restart: while (q.ptr != NULL) { union ehci_shadow temp; - int live; - live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state); switch (type) { case Q_TYPE_QH: /* handle any completions */ diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 74c42f722678..363d125300ea 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -37,10 +37,8 @@ void xhci_dbg_regs(struct xhci_hcd *xhci) &xhci->cap_regs->hc_capbase, temp); xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n", (unsigned int) HC_LENGTH(temp)); -#if 0 xhci_dbg(xhci, "// HCIVERSION: 0x%x\n", (unsigned int) HC_VERSION(temp)); -#endif xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs); @@ -177,7 +175,7 @@ static void xhci_print_ports(struct xhci_hcd *xhci) ports = HCS_MAX_PORTS(xhci->hcs_params1); addr = &xhci->op_regs->port_status_base; for (i = 0; i < ports; i++) { - for (j = 0; j < NUM_PORT_REGS; ++j) { + for (j = 0; j < NUM_PORT_REGS; j++) { xhci_dbg(xhci, "%p port %s reg = 0x%x\n", addr, names[j], (unsigned int) readl(addr)); @@ -240,7 +238,7 @@ void xhci_print_run_regs(struct xhci_hcd *xhci) xhci_dbg(xhci, " %p: Microframe index = 0x%x\n", &xhci->run_regs->microframe_index, (unsigned int) temp); - for (i = 0; i < 7; ++i) { + for (i = 0; i < 7; i++) { temp = readl(&xhci->run_regs->rsvd[i]); if (temp != XHCI_INIT_VALUE) xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n", @@ -259,7 +257,7 @@ void xhci_print_registers(struct xhci_hcd *xhci) void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb) { int i; - for (i = 0; i < 4; ++i) + for (i = 0; i < 4; i++) xhci_dbg(xhci, "Offset 0x%x = 0x%x\n", i*4, trb->generic.field[i]); } @@ -332,7 +330,7 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) u64 addr = seg->dma; union xhci_trb *trb = seg->trbs; - for (i = 0; i < TRBS_PER_SEGMENT; ++i) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) { trb = &seg->trbs[i]; xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), @@ -413,7 +411,7 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) int i; struct xhci_erst_entry *entry; - for (i = 0; i < erst->num_entries; ++i) { + for (i = 0; i < erst->num_entries; i++) { entry = &erst->entries[i]; xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, @@ -440,7 +438,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) { int i; - for (i = 0; i < 4; ++i) { + for (i = 0; i < 4; i++) { xhci_dbg(xhci, "@%p (virt) @%08llx " "(dma) %#08llx - rsvd64[%d]\n", &ctx[4 + i], (unsigned long long)dma, @@ -496,7 +494,7 @@ static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx * &slot_ctx->dev_state, (unsigned long long)dma, slot_ctx->dev_state); dma += field_size; - for (i = 0; i < 4; ++i) { + for (i = 0; i < 4; i++) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", &slot_ctx->reserved[i], (unsigned long long)dma, slot_ctx->reserved[i], i); @@ -519,7 +517,7 @@ static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, if (last_ep < 31) last_ep_ctx = last_ep + 1; - for (i = 0; i < last_ep_ctx; ++i) { + for (i = 0; i < last_ep_ctx; i++) { unsigned int epaddr = xhci_get_endpoint_address(i); struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); dma_addr_t dma = ctx->dma + @@ -544,7 +542,7 @@ static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, &ep_ctx->tx_info, (unsigned long long)dma, ep_ctx->tx_info); dma += field_size; - for (j = 0; j < 3; ++j) { + for (j = 0; j < 3; j++) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", &ep_ctx->reserved[j], (unsigned long long)dma, @@ -583,7 +581,7 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, &ctrl_ctx->add_flags, (unsigned long long)dma, ctrl_ctx->add_flags); dma += field_size; - for (i = 0; i < 6; ++i) { + for (i = 0; i < 6; i++) { xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", &ctrl_ctx->rsvd2[i], (unsigned long long)dma, ctrl_ctx->rsvd2[i], i); diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index e0244fb3903d..28deea584884 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -117,7 +117,7 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) offset = XHCI_HCC_EXT_CAPS(val) << 2; if (!offset) return 0; - }; + } do { val = readl(base + offset); if (val == ~0) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0ef16900efed..3bddeaa1e2d7 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -389,6 +389,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) if (!virt_dev) return -ENODEV; + trace_xhci_stop_device(virt_dev); + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); if (!cmd) { xhci_dbg(xhci, "Couldn't allocate command structure.\n"); @@ -418,7 +420,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) /* Wait for last stop endpoint command to finish */ wait_for_completion(cmd->completion); - if (cmd->status == COMP_CMD_ABORT || cmd->status == COMP_CMD_STOP) { + if (cmd->status == COMP_COMMAND_ABORTED || + cmd->status == COMP_STOPPED) { xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); ret = -ETIME; } @@ -458,6 +461,12 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, return; } + if (xhci->quirks & XHCI_BROKEN_PORT_PED) { + xhci_dbg(xhci, + "Broken Port Enabled/Disabled, ignoring port disable request.\n"); + return; + } + /* Write 1 to disable the port */ writel(port_status | PORT_PE, addr); port_status = readl(addr); @@ -990,8 +999,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = readl(port_array[wIndex]); if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) || (temp & PORT_PLS_MASK) >= XDEV_U3) { - xhci_warn(xhci, "USB core suspending device " - "not in U0/U1/U2.\n"); + xhci_warn(xhci, "USB core suspending device not in U0/U1/U2.\n"); goto error; } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8414ed2a02de..ba1853f4e407 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -936,6 +936,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) return; dev = xhci->devs[slot_id]; + + trace_xhci_free_virt_device(dev); + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; @@ -943,7 +946,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; - for (i = 0; i < 31; ++i) { + for (i = 0; i < 31; i++) { if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); if (dev->eps[i].stream_info) @@ -1075,6 +1078,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); + trace_xhci_alloc_virt_device(dev); + return 1; fail: xhci_free_virt_device(xhci, slot_id); @@ -1249,6 +1254,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); + trace_xhci_setup_addressable_virt_device(dev); + /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ return 0; @@ -1414,14 +1421,16 @@ static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) in = usb_endpoint_dir_in(&ep->desc); - if (usb_endpoint_xfer_control(&ep->desc)) + switch (usb_endpoint_type(&ep->desc)) { + case USB_ENDPOINT_XFER_CONTROL: return CTRL_EP; - if (usb_endpoint_xfer_bulk(&ep->desc)) + case USB_ENDPOINT_XFER_BULK: return in ? BULK_IN_EP : BULK_OUT_EP; - if (usb_endpoint_xfer_isoc(&ep->desc)) + case USB_ENDPOINT_XFER_ISOC: return in ? ISOC_IN_EP : ISOC_OUT_EP; - if (usb_endpoint_xfer_int(&ep->desc)) + case USB_ENDPOINT_XFER_INT: return in ? INT_IN_EP : INT_OUT_EP; + } return 0; } @@ -1587,7 +1596,7 @@ void xhci_update_bw_info(struct xhci_hcd *xhci, unsigned int ep_type; int i; - for (i = 1; i < 31; ++i) { + for (i = 1; i < 31; i++) { bw_info = &virt_dev->eps[i].bw_info; /* We can't tell what endpoint type is being dropped, but @@ -1808,10 +1817,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, void xhci_urb_free_priv(struct urb_priv *urb_priv) { - if (urb_priv) { - kfree(urb_priv->td[0]); - kfree(urb_priv); - } + kfree(urb_priv); } void xhci_free_command(struct xhci_hcd *xhci, @@ -2569,9 +2575,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ - for (i = 0; i < MAX_HC_SLOTS; ++i) + for (i = 0; i < MAX_HC_SLOTS; i++) xhci->devs[i] = NULL; - for (i = 0; i < USB_MAXCHILDREN; ++i) { + for (i = 0; i < USB_MAXCHILDREN; i++) { xhci->bus_state[0].resume_done[i] = 0; xhci->bus_state[1].resume_done[i] = 0; /* Only the USB 2.0 completions will ever be used. */ diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index bac961cd24ad..b30806a0d37a 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -212,6 +212,12 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk) { int ret; + ret = clk_prepare_enable(mtk->ref_clk); + if (ret) { + dev_err(mtk->dev, "failed to enable ref_clk\n"); + goto ref_clk_err; + } + ret = clk_prepare_enable(mtk->sys_clk); if (ret) { dev_err(mtk->dev, "failed to enable sys_clk\n"); @@ -238,6 +244,8 @@ usb_p1_err: usb_p0_err: clk_disable_unprepare(mtk->sys_clk); sys_clk_err: + clk_disable_unprepare(mtk->ref_clk); +ref_clk_err: return -EINVAL; } @@ -248,6 +256,7 @@ static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk) clk_disable_unprepare(mtk->wk_deb_p0); } clk_disable_unprepare(mtk->sys_clk); + clk_disable_unprepare(mtk->ref_clk); } /* only clocks can be turn off for ip-sleep wakeup mode */ @@ -550,6 +559,12 @@ static int xhci_mtk_probe(struct platform_device *pdev) return PTR_ERR(mtk->sys_clk); } + mtk->ref_clk = devm_clk_get(dev, "ref_ck"); + if (IS_ERR(mtk->ref_clk)) { + dev_err(dev, "fail to get ref_ck\n"); + return PTR_ERR(mtk->ref_clk); + } + mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable"); ret = usb_wakeup_of_property_parse(mtk, node); diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 2845c49efe1b..3aa5e1d25064 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -124,6 +124,7 @@ struct xhci_hcd_mtk { struct regulator *vusb33; struct regulator *vbus; struct clk *sys_clk; /* sys and mac clock */ + struct clk *ref_clk; struct clk *wk_deb_p0; /* port0's wakeup debounce clock */ struct clk *wk_deb_p1; struct regmap *pericfg; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 954abfd5014d..fc99f51d12e1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -242,11 +242,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd) xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ - retval = xhci_pci_reinit(xhci, pdev); - if (!retval) - return retval; - - return retval; + return xhci_pci_reinit(xhci, pdev); } /* diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index ddfab301e366..6d33b42ffcf5 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev) return -ENODEV; /* Try to set 64-bit DMA first */ - if (WARN_ON(!pdev->dev.dma_mask)) + if (!pdev->dev.dma_mask) /* Platform did not initialize dma_mask */ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -232,8 +232,8 @@ static int xhci_plat_probe(struct platform_device *pdev) if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) - xhci->shared_hcd->can_do_streams = 1; + if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) + xhci->quirks |= XHCI_BROKEN_PORT_PED; hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0); if (IS_ERR(hcd->usb_phy)) { @@ -251,6 +251,9 @@ static int xhci_plat_probe(struct platform_device *pdev) if (ret) goto disable_usb_phy; + if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + xhci->shared_hcd->can_do_streams = 1; + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto dealloc_usb2_hcd; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index e32029a31ca4..d9936c771fa0 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -119,14 +119,29 @@ static bool last_td_in_urb(struct xhci_td *td) { struct urb_priv *urb_priv = td->urb->hcpriv; - return urb_priv->td_cnt == urb_priv->length; + return urb_priv->num_tds_done == urb_priv->num_tds; } static void inc_td_cnt(struct urb *urb) { struct urb_priv *urb_priv = urb->hcpriv; - urb_priv->td_cnt++; + urb_priv->num_tds_done++; +} + +static void trb_to_noop(union xhci_trb *trb, u32 noop_type) +{ + if (trb_is_link(trb)) { + /* unchain chained link TRBs */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + /* Preserve only the cycle bit of this TRB */ + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -299,27 +314,19 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, struct xhci_command *cur_cmd) { struct xhci_command *i_cmd; - u32 cycle_state; /* Turn all aborted commands in list to no-ops, then restart */ list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { - if (i_cmd->status != COMP_CMD_ABORT) + if (i_cmd->status != COMP_COMMAND_ABORTED) continue; - i_cmd->status = COMP_CMD_STOP; + i_cmd->status = COMP_STOPPED; xhci_dbg(xhci, "Turn aborted command %p to no-op\n", i_cmd->command_trb); - /* get cycle state from the original cmd trb */ - cycle_state = le32_to_cpu( - i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; - /* modify the command trb to no-op command */ - i_cmd->command_trb->generic.field[0] = 0; - i_cmd->command_trb->generic.field[1] = 0; - i_cmd->command_trb->generic.field[2] = 0; - i_cmd->command_trb->generic.field[3] = cpu_to_le32( - TRB_TYPE(TRB_CMD_NOOP) | cycle_state); + + trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); /* * caller waiting for completion is called when command @@ -362,19 +369,11 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) ret = xhci_handshake(&xhci->op_regs->cmd_ring, CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { - /* we are about to kill xhci, give it one more chance */ - xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, - &xhci->op_regs->cmd_ring); - udelay(1000); - ret = xhci_handshake(&xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 3 * 1000 * 1000); - if (ret < 0) { - xhci_err(xhci, "Stopped the command ring failed, " - "maybe the host is dead\n"); - xhci->xhc_state |= XHCI_STATE_DYING; - xhci_halt(xhci); - return -ESHUTDOWN; - } + xhci_err(xhci, + "Stop command ring failed, maybe the host is dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + xhci_halt(xhci); + return -ESHUTDOWN; } /* * Writing the CMD_RING_ABORT bit should cause a cmd completion event, @@ -410,7 +409,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, * pointer command pending because the device can choose to start any * stream once the endpoint is on the HW schedule. */ - if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || + if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || (ep_state & EP_HALTED)) return; writel(DB_VALUE(ep_index, stream_id), db_addr); @@ -600,18 +599,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, union xhci_trb *trb = td->first_trb; while (1) { - if (trb_is_link(trb)) { - /* unchain chained link TRBs */ - trb->link.control &= cpu_to_le32(~TRB_CHAIN); - } else { - trb->generic.field[0] = 0; - trb->generic.field[1] = 0; - trb->generic.field[2] = 0; - /* Preserve only the cycle bit of this TRB */ - trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); - trb->generic.field[3] |= cpu_to_le32( - TRB_TYPE(TRB_TR_NOOP)); - } + trb_to_noop(trb, TRB_TR_NOOP); + /* flip cycle if asked to */ if (flip_cycle && trb != td->first_trb && trb != td->last_trb) trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); @@ -626,13 +615,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { - ep->ep_state &= ~EP_HALT_PENDING; - /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the - * timer is running on another CPU, we don't decrement stop_cmds_pending - * (since we didn't successfully stop the watchdog timer). - */ - if (del_timer(&ep->stop_cmd_timer)) - ep->stop_cmds_pending--; + ep->ep_state &= ~EP_STOP_CMD_PENDING; + /* Can't del_timer_sync in interrupt */ + del_timer(&ep->stop_cmd_timer); } /* @@ -657,6 +642,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock(&xhci->lock); usb_hcd_giveback_urb(hcd, urb, status); + trace_xhci_urb_giveback(urb); spin_lock(&xhci->lock); } @@ -667,7 +653,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; - if (!seg || !urb) + if (!ring || !seg || !urb) return; if (usb_urb_dir_out(urb)) { @@ -701,7 +687,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; - struct list_head *entry; struct xhci_td *cur_td = NULL; struct xhci_td *last_unlinked_td; @@ -718,6 +703,8 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, memset(&deq_state, 0, sizeof(deq_state)); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); ep = &xhci->devs[slot_id]->eps[ep_index]; + last_unlinked_td = list_last_entry(&ep->cancelled_td_list, + struct xhci_td, cancelled_td_list); if (list_empty(&ep->cancelled_td_list)) { xhci_stop_watchdog_timer_in_irq(xhci, ep); @@ -731,8 +718,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, * it. We're also in the event handler, so we can't get re-interrupted * if another Stop Endpoint command completes */ - list_for_each(entry, &ep->cancelled_td_list) { - cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); + list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Removing canceled TD starting at 0x%llx (dma).", (unsigned long long)xhci_trb_virt_to_dma( @@ -774,7 +760,7 @@ remove_finished_td: */ list_del_init(&cur_td->td_list); } - last_unlinked_td = cur_td; + xhci_stop_watchdog_timer_in_irq(xhci, ep); /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ @@ -796,7 +782,7 @@ remove_finished_td: * So stop when we've completed the URB for the last TD we unlinked. */ do { - cur_td = list_entry(ep->cancelled_td_list.next, + cur_td = list_first_entry(&ep->cancelled_td_list, struct xhci_td, cancelled_td_list); list_del_init(&cur_td->cancelled_td_list); @@ -805,8 +791,7 @@ remove_finished_td: * just overwrite it (because the URB has been unlinked). */ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); - if (ep_ring && cur_td->bounce_seg) - xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); + xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); inc_td_cnt(cur_td->urb); if (last_td_in_urb(cur_td)) xhci_giveback_urb_in_irq(xhci, cur_td, 0); @@ -824,16 +809,15 @@ remove_finished_td: static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) { struct xhci_td *cur_td; + struct xhci_td *tmp; - while (!list_empty(&ring->td_list)) { - cur_td = list_first_entry(&ring->td_list, - struct xhci_td, td_list); + list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { list_del_init(&cur_td->td_list); + if (!list_empty(&cur_td->cancelled_td_list)) list_del_init(&cur_td->cancelled_td_list); - if (cur_td->bounce_seg) - xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); + xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); inc_td_cnt(cur_td->urb); if (last_td_in_urb(cur_td)) @@ -845,6 +829,7 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, int slot_id, int ep_index) { struct xhci_td *cur_td; + struct xhci_td *tmp; struct xhci_virt_ep *ep; struct xhci_ring *ring; @@ -870,12 +855,12 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, slot_id, ep_index); xhci_kill_ring_urbs(xhci, ring); } - while (!list_empty(&ep->cancelled_td_list)) { - cur_td = list_first_entry(&ep->cancelled_td_list, - struct xhci_td, cancelled_td_list); - list_del_init(&cur_td->cancelled_td_list); + list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, + cancelled_td_list) { + list_del_init(&cur_td->cancelled_td_list); inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } @@ -895,10 +880,8 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, * simple flag to say whether there is a pending stop endpoint command for a * particular endpoint. * - * Instead we use a combination of that flag and a counter for the number of - * pending stop endpoint commands. If the timer is the tail end of the last - * stop endpoint command, and the endpoint's command is still pending, we assume - * the host is dying. + * Instead we use a combination of that flag and checking if a new timer is + * pending. */ void xhci_stop_endpoint_command_watchdog(unsigned long arg) { @@ -912,12 +895,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); - ep->stop_cmds_pending--; - if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Stop EP timer ran, but no command pending, " - "exiting."); + /* bail out if cmd completed but raced with stop ep watchdog timer.*/ + if (!(ep->ep_state & EP_STOP_CMD_PENDING) || + timer_pending(&ep->stop_cmd_timer)) { spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit"); return; } @@ -926,7 +908,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) /* Oops, HC is dead or dying or at least not responding to the stop * endpoint command. */ + xhci->xhc_state |= XHCI_STATE_DYING; + ep->ep_state &= ~EP_STOP_CMD_PENDING; + /* Disable interrupts from the host controller and start halting it */ xhci_quiesce(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -1050,10 +1035,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, unsigned int slot_state; switch (cmd_comp_code) { - case COMP_TRB_ERR: + case COMP_TRB_ERROR: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); break; - case COMP_CTX_STATE: + case COMP_CONTEXT_STATE_ERROR: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); ep_state = GET_EP_CTX_STATE(ep_ctx); slot_state = le32_to_cpu(slot_ctx->dev_state); @@ -1062,7 +1047,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, "Slot state = %u, EP state = %u", slot_state, ep_state); break; - case COMP_EBADSLT: + case COMP_SLOT_NOT_ENABLED_ERROR: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", slot_id); break; @@ -1259,7 +1244,7 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci) { struct xhci_command *cur_cmd, *tmp_cmd; list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) - xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); + xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); } void xhci_handle_command_timeout(struct work_struct *work) @@ -1282,7 +1267,7 @@ void xhci_handle_command_timeout(struct work_struct *work) return; } /* mark this command to be cancelled */ - xhci->current_cmd->status = COMP_CMD_ABORT; + xhci->current_cmd->status = COMP_COMMAND_ABORTED; /* Make sure command ring is running before aborting it */ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); @@ -1335,6 +1320,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_dma = le64_to_cpu(event->cmd_trb); cmd_trb = xhci->cmd_ring->dequeue; + + trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); + cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); /* @@ -1347,16 +1335,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, return; } - cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); + cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); cancel_delayed_work(&xhci->cmd_timer); - trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); - cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); /* If CMD ring stopped we own the trbs between enqueue and dequeue */ - if (cmd_comp_code == COMP_CMD_STOP) { + if (cmd_comp_code == COMP_STOPPED) { complete_all(&xhci->cmd_ring_stop_completion); return; } @@ -1373,9 +1359,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, * The command ring is stopped now, but the xHC will issue a Command * Ring Stopped event which will cause us to restart it. */ - if (cmd_comp_code == COMP_CMD_ABORT) { + if (cmd_comp_code == COMP_COMMAND_ABORTED) { xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - if (cmd->status == COMP_CMD_ABORT) { + if (cmd->status == COMP_COMMAND_ABORTED) { if (xhci->current_cmd == cmd) xhci->current_cmd = NULL; goto event_handled; @@ -1411,8 +1397,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; case TRB_CMD_NOOP: /* Is this an aborted command turned to NO-OP? */ - if (cmd->status == COMP_CMD_STOP) - cmd_comp_code = COMP_CMD_STOP; + if (cmd->status == COMP_STOPPED) + cmd_comp_code = COMP_STOPPED; break; case TRB_RESET_EP: WARN_ON(slot_id != TRB_TO_SLOT_ID( @@ -1437,9 +1423,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } /* restart timer if this wasn't the last command */ - if (cmd->cmd_list.next != &xhci->cmd_list) { - xhci->current_cmd = list_entry(cmd->cmd_list.next, - struct xhci_command, cmd_list); + if (!list_is_singular(&xhci->cmd_list)) { + xhci->current_cmd = list_first_entry(&cmd->cmd_list, + struct xhci_command, cmd_list); xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); } else if (xhci->current_cmd == cmd) { xhci->current_cmd = NULL; @@ -1805,9 +1791,9 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, unsigned int trb_comp_code) { /* TRB completion codes that may require a manual halt cleanup */ - if (trb_comp_code == COMP_TX_ERR || - trb_comp_code == COMP_BABBLE || - trb_comp_code == COMP_SPLIT_ERR) + if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || + trb_comp_code == COMP_BABBLE_DETECTED_ERROR || + trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) /* The 0.95 spec says a babbling control endpoint * is not halted. The 0.96 spec says it is. Some HW * claims to be 0.95 compliant, but it halts the control @@ -1834,22 +1820,64 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } -/* - * Finish the td processing, remove the td from td list; - * Return 1 if the urb can be given back. - */ +static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int *status) +{ + struct urb_priv *urb_priv; + struct urb *urb = NULL; + + /* Clean up the endpoint's TD list */ + urb = td->urb; + urb_priv = urb->hcpriv; + + /* if a bounce buffer was used to align this td then unmap it */ + xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); + + /* Do one last check of the actual transfer length. + * If the host controller said we transferred more data than the buffer + * length, urb->actual_length will be a very big number (since it's + * unsigned). Play it safe and say we didn't transfer anything. + */ + if (urb->actual_length > urb->transfer_buffer_length) { + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); + urb->actual_length = 0; + *status = 0; + } + list_del_init(&td->td_list); + /* Was this TD slated to be cancelled but completed anyway? */ + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + + inc_td_cnt(urb); + /* Giveback the urb when all the tds are completed */ + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, *status); + + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + *status = 0; + xhci_giveback_urb_in_irq(xhci, td, *status); + } + + return 0; +} + static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status, bool skip) { struct xhci_virt_device *xdev; + struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; unsigned int slot_id; - int ep_index; - struct urb *urb = NULL; - struct xhci_ep_ctx *ep_ctx; - struct urb_priv *urb_priv; u32 trb_comp_code; + int ep_index; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -1861,9 +1889,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, if (skip) goto td_cleanup; - if (trb_comp_code == COMP_STOP_INVAL || - trb_comp_code == COMP_STOP || - trb_comp_code == COMP_STOP_SHORT) { + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_SHORT_PACKET) { /* The Endpoint Stop Command completion will take care of any * stopped TDs. A stopped TD may be restarted, so don't update * the ring dequeue pointer or take this TD off any lists yet. @@ -1871,7 +1899,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ep->stopped_td = td; return 0; } - if (trb_comp_code == COMP_STALL || + if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { /* Issue a reset endpoint command to clear the host side @@ -1889,46 +1917,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, } td_cleanup: - /* Clean up the endpoint's TD list */ - urb = td->urb; - urb_priv = urb->hcpriv; - - /* if a bounce buffer was used to align this td then unmap it */ - if (td->bounce_seg) - xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); - - /* Do one last check of the actual transfer length. - * If the host controller said we transferred more data than the buffer - * length, urb->actual_length will be a very big number (since it's - * unsigned). Play it safe and say we didn't transfer anything. - */ - if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", - urb->transfer_buffer_length, urb->actual_length); - urb->actual_length = 0; - *status = 0; - } - list_del_init(&td->td_list); - /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) - list_del_init(&td->cancelled_td_list); - - inc_td_cnt(urb); - /* Giveback the urb when all the tds are completed */ - if (last_td_in_urb(td)) { - if ((urb->actual_length != urb->transfer_buffer_length && - (urb->transfer_flags & URB_SHORT_NOT_OK)) || - (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) - xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", - urb, urb->actual_length, - urb->transfer_buffer_length, *status); - - /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - *status = 0; - xhci_giveback_urb_in_irq(xhci, td, *status); - } - return 0; + return xhci_td_cleanup(xhci, td, ep_ring, status); } /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ @@ -1982,16 +1971,16 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, } *status = 0; break; - case COMP_SHORT_TX: + case COMP_SHORT_PACKET: *status = 0; break; - case COMP_STOP_SHORT: + case COMP_STOPPED_SHORT_PACKET: if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) td->urb->actual_length = remaining; else xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); goto finish_td; - case COMP_STOP: + case COMP_STOPPED: switch (trb_type) { case TRB_SETUP: td->urb->actual_length = 0; @@ -2005,7 +1994,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, trb_type); goto finish_td; } - case COMP_STOP_INVAL: + case COMP_STOPPED_LENGTH_INVALID: goto finish_td; default: if (!xhci_requires_manual_halt_cleanup(xhci, @@ -2014,7 +2003,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", trb_comp_code, ep_index); /* else fall through */ - case COMP_STALL: + case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) td->urb->actual_length = requested - remaining; @@ -2066,7 +2055,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; - idx = urb_priv->td_cnt; + idx = urb_priv->num_tds_done; frame = &td->urb->iso_frame_desc[idx]; requested = frame->length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2085,35 +2074,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, } frame->status = 0; break; - case COMP_SHORT_TX: + case COMP_SHORT_PACKET: frame->status = short_framestatus; sum_trbs_for_length = true; break; - case COMP_BW_OVER: + case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_BUFF_OVER: - case COMP_BABBLE: + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: frame->status = -EOVERFLOW; break; - case COMP_DEV_ERR: - case COMP_STALL: + case COMP_INCOMPATIBLE_DEVICE_ERROR: + case COMP_STALL_ERROR: frame->status = -EPROTO; break; - case COMP_TX_ERR: + case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; if (ep_trb != td->last_trb) return 0; break; - case COMP_STOP: + case COMP_STOPPED: sum_trbs_for_length = true; break; - case COMP_STOP_SHORT: + case COMP_STOPPED_SHORT_PACKET: /* field normally containing residue now contains tranferred */ frame->status = short_framestatus; requested = remaining; break; - case COMP_STOP_INVAL: + case COMP_STOPPED_LENGTH_INVALID: requested = 0; remaining = 0; break; @@ -2145,7 +2134,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); urb_priv = td->urb->hcpriv; - idx = urb_priv->td_cnt; + idx = urb_priv->num_tds_done; frame = &td->urb->iso_frame_desc[idx]; /* The transfer is partly done. */ @@ -2190,16 +2179,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, } *status = 0; break; - case COMP_SHORT_TX: + case COMP_SHORT_PACKET: xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, requested, remaining); *status = 0; break; - case COMP_STOP_SHORT: + case COMP_STOPPED_SHORT_PACKET: td->urb->actual_length = remaining; goto finish_td; - case COMP_STOP_INVAL: + case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ ep_trb_len = 0; remaining = 0; @@ -2231,8 +2220,6 @@ finish_td: */ static int handle_tx_event(struct xhci_hcd *xhci, struct xhci_transfer_event *event) - __releases(&xhci->lock) - __acquires(&xhci->lock) { struct xhci_virt_device *xdev; struct xhci_virt_ep *ep; @@ -2305,50 +2292,50 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) - trb_comp_code = COMP_SHORT_TX; + trb_comp_code = COMP_SHORT_PACKET; else xhci_warn_ratelimited(xhci, "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); - case COMP_SHORT_TX: + case COMP_SHORT_PACKET: break; - case COMP_STOP: + case COMP_STOPPED: xhci_dbg(xhci, "Stopped on Transfer TRB\n"); break; - case COMP_STOP_INVAL: + case COMP_STOPPED_LENGTH_INVALID: xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); break; - case COMP_STOP_SHORT: + case COMP_STOPPED_SHORT_PACKET: xhci_dbg(xhci, "Stopped with short packet transfer detected\n"); break; - case COMP_STALL: + case COMP_STALL_ERROR: xhci_dbg(xhci, "Stalled endpoint\n"); ep->ep_state |= EP_HALTED; status = -EPIPE; break; - case COMP_TRB_ERR: + case COMP_TRB_ERROR: xhci_warn(xhci, "WARN: TRB error on endpoint\n"); status = -EILSEQ; break; - case COMP_SPLIT_ERR: - case COMP_TX_ERR: + case COMP_SPLIT_TRANSACTION_ERROR: + case COMP_USB_TRANSACTION_ERROR: xhci_dbg(xhci, "Transfer error on endpoint\n"); status = -EPROTO; break; - case COMP_BABBLE: + case COMP_BABBLE_DETECTED_ERROR: xhci_dbg(xhci, "Babble error on endpoint\n"); status = -EOVERFLOW; break; - case COMP_DB_ERR: + case COMP_DATA_BUFFER_ERROR: xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); status = -ENOSR; break; - case COMP_BW_OVER: + case COMP_BANDWIDTH_OVERRUN_ERROR: xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); break; - case COMP_BUFF_OVER: + case COMP_ISOCH_BUFFER_OVERRUN: xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); break; - case COMP_UNDERRUN: + case COMP_RING_UNDERRUN: /* * When the Isoch ring is empty, the xHC will generate * a Ring Overrun Event for IN Isoch endpoint or Ring @@ -2361,7 +2348,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); goto cleanup; - case COMP_OVERRUN: + case COMP_RING_OVERRUN: xhci_dbg(xhci, "overrun event on endpoint\n"); if (!list_empty(&ep_ring->td_list)) xhci_dbg(xhci, "Overrun Event for slot %d ep %d " @@ -2369,11 +2356,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); goto cleanup; - case COMP_DEV_ERR: + case COMP_INCOMPATIBLE_DEVICE_ERROR: xhci_warn(xhci, "WARN: detect an incompatible device"); status = -EPROTO; break; - case COMP_MISSED_INT: + case COMP_MISSED_SERVICE_ERROR: /* * When encounter missed service error, one or more isoc tds * may be missed by xHC. @@ -2383,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = true; xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); goto cleanup; - case COMP_PING_ERR: + case COMP_NO_PING_RESPONSE_ERROR: ep->skip = true; xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); goto cleanup; @@ -2407,8 +2394,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, * event if the device was suspended. Don't print * warnings. */ - if (!(trb_comp_code == COMP_STOP || - trb_comp_code == COMP_STOP_INVAL)) { + if (!(trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); @@ -2433,7 +2420,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto cleanup; } - td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); + td = list_first_entry(&ep_ring->td_list, struct xhci_td, + td_list); if (ep->skip) td_num--; @@ -2449,8 +2437,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!ep_seg && (trb_comp_code == COMP_STOP || - trb_comp_code == COMP_STOP_INVAL)) { + if (!ep_seg && (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { goto cleanup; } @@ -2481,7 +2469,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, skip_isoc_td(xhci, td, event, ep, &status); goto cleanup; } - if (trb_comp_code == COMP_SHORT_TX) + if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; else ep_ring->last_td_was_short = false; @@ -2493,6 +2481,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; + + trace_xhci_handle_transfer(ep_ring, + (struct xhci_generic_trb *) ep_trb); + /* * No-op TRB should not trigger interrupts. * If ep_trb is a no-op TRB, it means the @@ -2514,8 +2506,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, &status); cleanup: handling_skipped_tds = ep->skip && - trb_comp_code != COMP_MISSED_INT && - trb_comp_code != COMP_PING_ERR; + trb_comp_code != COMP_MISSED_SERVICE_ERROR && + trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; /* * Do not update event ring dequeue pointer if we're in a loop @@ -2559,6 +2551,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci) xhci->event_ring->cycle_state) return 0; + trace_xhci_handle_event(xhci->event_ring, &event->generic); + /* * Barrier between reading the TRB_CYCLE (valid) flag above and any * speculative reads of the event's flags/data below. @@ -2617,27 +2611,28 @@ static int xhci_handle_event(struct xhci_hcd *xhci) irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - u32 status; - u64 temp_64; union xhci_trb *event_ring_deq; + irqreturn_t ret = IRQ_NONE; dma_addr_t deq; + u64 temp_64; + u32 status; spin_lock(&xhci->lock); /* Check if the xHC generated the interrupt, or the irq is shared */ status = readl(&xhci->op_regs->status); - if (status == 0xffffffff) - goto hw_died; - - if (!(status & STS_EINT)) { - spin_unlock(&xhci->lock); - return IRQ_NONE; + if (status == 0xffffffff) { + ret = IRQ_HANDLED; + goto out; } + + if (!(status & STS_EINT)) + goto out; + if (status & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); -hw_died: - spin_unlock(&xhci->lock); - return IRQ_HANDLED; + ret = IRQ_HANDLED; + goto out; } /* @@ -2668,9 +2663,8 @@ hw_died: temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); - spin_unlock(&xhci->lock); - - return IRQ_HANDLED; + ret = IRQ_HANDLED; + goto out; } event_ring_deq = xhci->event_ring->dequeue; @@ -2695,10 +2689,12 @@ hw_died: /* Clear the event handler busy flag (RW1C); event ring is empty. */ temp_64 |= ERST_EHB; xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); + ret = IRQ_HANDLED; +out: spin_unlock(&xhci->lock); - return IRQ_HANDLED; + return ret; } irqreturn_t xhci_msi_irq(int irq, void *hcd) @@ -2726,6 +2722,9 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[1] = cpu_to_le32(field2); trb->field[2] = cpu_to_le32(field3); trb->field[3] = cpu_to_le32(field4); + + trace_xhci_queue_trb(ring, trb); + inc_enq(xhci, ring, more_trbs_coming); } @@ -2839,7 +2838,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, return ret; urb_priv = urb->hcpriv; - td = urb_priv->td[td_index]; + td = &urb_priv->td[td_index]; INIT_LIST_HEAD(&td->td_list); INIT_LIST_HEAD(&td->cancelled_td_list); @@ -2856,8 +2855,6 @@ static int prepare_transfer(struct xhci_hcd *xhci, td->start_seg = ep_ring->enq_seg; td->first_trb = ep_ring->enqueue; - urb_priv->td[td_index] = td; - return 0; } @@ -3134,10 +3131,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; /* Deal with URB_ZERO_PACKET - need one more td/trb */ - if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1) + if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) need_zero_pkt = true; - td = urb_priv->td[0]; + td = &urb_priv->td[0]; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3230,7 +3227,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, 1, urb, 1, mem_flags); - urb_priv->td[1]->last_trb = ring->enqueue; + urb_priv->td[1].last_trb = ring->enqueue; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); } @@ -3251,7 +3248,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct usb_ctrlrequest *setup; struct xhci_generic_trb *start_trb; int start_cycle; - u32 field, length_field, remainder; + u32 field; struct urb_priv *urb_priv; struct xhci_td *td; @@ -3282,7 +3279,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return ret; urb_priv = urb->hcpriv; - td = urb_priv->td[0]; + td = &urb_priv->td[0]; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3324,16 +3321,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, else field = TRB_TYPE(TRB_DATA); - remainder = xhci_td_remainder(xhci, 0, - urb->transfer_buffer_length, - urb->transfer_buffer_length, - urb, 1); - - length_field = TRB_LEN(urb->transfer_buffer_length) | - TRB_TD_SIZE(remainder) | - TRB_INTR_TARGET(0); - if (urb->transfer_buffer_length > 0) { + u32 length_field, remainder; + + remainder = xhci_td_remainder(xhci, 0, + urb->transfer_buffer_length, + urb->transfer_buffer_length, + urb, 1); + length_field = TRB_LEN(urb->transfer_buffer_length) | + TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; queue_trb(xhci, ep_ring, true, @@ -3570,7 +3567,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return ret; goto cleanup; } - td = urb_priv->td[i]; + td = &urb_priv->td[i]; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; @@ -3677,20 +3674,20 @@ cleanup: /* Clean up a partially enqueued isoc transfer. */ for (i--; i >= 0; i--) - list_del_init(&urb_priv->td[i]->td_list); + list_del_init(&urb_priv->td[i].td_list); /* Use the first TD as a temporary variable to turn the TDs we've queued * into No-ops with a software-owned cycle bit. That way the hardware * won't accidentally start executing bogus TDs when we partially * overwrite them. td->first_trb and td->start_seg are already set. */ - urb_priv->td[0]->last_trb = ep_ring->enqueue; + urb_priv->td[0].last_trb = ep_ring->enqueue; /* Every TRB except the first & last will have its cycle bit flipped. */ - td_to_noop(xhci, ep_ring, urb_priv->td[0], true); + td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); /* Reset the ring enqueue back to the first TRB and its cycle bit. */ - ep_ring->enqueue = urb_priv->td[0]->first_trb; - ep_ring->enq_seg = urb_priv->td[0]->start_seg; + ep_ring->enqueue = urb_priv->td[0].first_trb; + ep_ring->enq_seg = urb_priv->td[0].start_seg; ep_ring->cycle_state = start_cycle; ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); @@ -3816,15 +3813,15 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, } cmd->command_trb = xhci->cmd_ring->enqueue; - list_add_tail(&cmd->cmd_list, &xhci->cmd_list); /* if there are no other commands queued we start the timeout timer */ - if (xhci->cmd_list.next == &cmd->cmd_list && - !delayed_work_pending(&xhci->cmd_timer)) { + if (list_empty(&xhci->cmd_list)) { xhci->current_cmd = cmd; xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); } + list_add_tail(&cmd->cmd_list, &xhci->cmd_list); + queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, field4 | xhci->cmd_ring->cycle_state); return 0; diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 59c05653b2ea..1ac2cdf8eece 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -103,7 +103,7 @@ DECLARE_EVENT_CLASS(xhci_log_ctx, ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) * ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1)); ), - TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p", + TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p", __entry->ctx_64, __entry->ctx_type, (unsigned long long) __entry->ctx_dma, __entry->ctx_va ) @@ -115,34 +115,174 @@ DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx, TP_ARGS(xhci, ctx, ep_num) ); -DECLARE_EVENT_CLASS(xhci_log_event, - TP_PROTO(void *trb_va, struct xhci_generic_trb *ev), - TP_ARGS(trb_va, ev), +DECLARE_EVENT_CLASS(xhci_log_trb, + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), + TP_ARGS(ring, trb), TP_STRUCT__entry( - __field(void *, va) - __field(u64, dma) - __field(u32, status) - __field(u32, flags) - __dynamic_array(u8, trb, sizeof(struct xhci_generic_trb)) + __field(u32, type) + __field(u32, field0) + __field(u32, field1) + __field(u32, field2) + __field(u32, field3) ), TP_fast_assign( - __entry->va = trb_va; - __entry->dma = ((u64)le32_to_cpu(ev->field[1])) << 32 | - le32_to_cpu(ev->field[0]); - __entry->status = le32_to_cpu(ev->field[2]); - __entry->flags = le32_to_cpu(ev->field[3]); - memcpy(__get_dynamic_array(trb), trb_va, - sizeof(struct xhci_generic_trb)); + __entry->type = ring->type; + __entry->field0 = le32_to_cpu(trb->field[0]); + __entry->field1 = le32_to_cpu(trb->field[1]); + __entry->field2 = le32_to_cpu(trb->field[2]); + __entry->field3 = le32_to_cpu(trb->field[3]); ), - TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x", - (unsigned long long) __entry->dma, __entry->va, - __entry->status, __entry->flags + TP_printk("%s: %s", xhci_ring_type_string(__entry->type), + xhci_decode_trb(__entry->field0, __entry->field1, + __entry->field2, __entry->field3) ) ); -DEFINE_EVENT(xhci_log_event, xhci_cmd_completion, - TP_PROTO(void *trb_va, struct xhci_generic_trb *ev), - TP_ARGS(trb_va, ev) +DEFINE_EVENT(xhci_log_trb, xhci_handle_event, + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(xhci_log_trb, xhci_handle_command, + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer, + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DEFINE_EVENT(xhci_log_trb, xhci_queue_trb, + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), + TP_ARGS(ring, trb) +); + +DECLARE_EVENT_CLASS(xhci_log_virt_dev, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev), + TP_STRUCT__entry( + __field(void *, vdev) + __field(unsigned long long, out_ctx) + __field(unsigned long long, in_ctx) + __field(int, devnum) + __field(int, state) + __field(int, speed) + __field(u8, portnum) + __field(u8, level) + __field(int, slot_id) + ), + TP_fast_assign( + __entry->vdev = vdev; + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; + __entry->devnum = vdev->udev->devnum; + __entry->state = vdev->udev->state; + __entry->speed = vdev->udev->speed; + __entry->portnum = vdev->udev->portnum; + __entry->level = vdev->udev->level; + __entry->slot_id = vdev->udev->slot_id; + ), + TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d", + __entry->vdev, __entry->in_ctx, __entry->out_ctx, + __entry->devnum, __entry->state, __entry->speed, + __entry->portnum, __entry->level, __entry->slot_id + ) +); + +DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + +DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + +DECLARE_EVENT_CLASS(xhci_log_urb, + TP_PROTO(struct urb *urb), + TP_ARGS(urb), + TP_STRUCT__entry( + __field(void *, urb) + __field(unsigned int, pipe) + __field(unsigned int, stream) + __field(int, status) + __field(unsigned int, flags) + __field(int, num_mapped_sgs) + __field(int, num_sgs) + __field(int, length) + __field(int, actual) + __field(int, epnum) + __field(int, dir_in) + __field(int, type) + ), + TP_fast_assign( + __entry->urb = urb; + __entry->pipe = urb->pipe; + __entry->stream = urb->stream_id; + __entry->status = urb->status; + __entry->flags = urb->transfer_flags; + __entry->num_mapped_sgs = urb->num_mapped_sgs; + __entry->num_sgs = urb->num_sgs; + __entry->length = urb->transfer_buffer_length; + __entry->actual = urb->actual_length; + __entry->epnum = usb_endpoint_num(&urb->ep->desc); + __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc); + __entry->type = usb_endpoint_type(&urb->ep->desc); + ), + TP_printk("ep%d%s-%s: urb %p pipe %u length %d/%d sgs %d/%d stream %d flags %08x", + __entry->epnum, __entry->dir_in ? "in" : "out", + ({ char *s; + switch (__entry->type) { + case USB_ENDPOINT_XFER_INT: + s = "intr"; + break; + case USB_ENDPOINT_XFER_CONTROL: + s = "control"; + break; + case USB_ENDPOINT_XFER_BULK: + s = "bulk"; + break; + case USB_ENDPOINT_XFER_ISOC: + s = "isoc"; + break; + default: + s = "UNKNOWN"; + } s; }), __entry->urb, __entry->pipe, __entry->actual, + __entry->length, __entry->num_mapped_sgs, + __entry->num_sgs, __entry->stream, __entry->flags + ) +); + +DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue, + TP_PROTO(struct urb *urb), + TP_ARGS(urb) +); + +DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback, + TP_PROTO(struct urb *urb), + TP_ARGS(urb) +); + +DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue, + TP_PROTO(struct urb *urb), + TP_ARGS(urb) ); #endif /* __XHCI_TRACE_H */ diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9a0ec116654a..6d6c46000e56 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -207,7 +207,7 @@ int xhci_reset(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, 10 * 1000 * 1000); - for (i = 0; i < 2; ++i) { + for (i = 0; i < 2; i++) { xhci->bus_state[i].port_c_suspend = 0; xhci->bus_state[i].suspended_ports = 0; xhci->bus_state[i].resuming_ports = 0; @@ -1332,12 +1332,11 @@ command_cleanup: int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct xhci_td *buffer; unsigned long flags; int ret = 0; - unsigned int slot_id, ep_index; + unsigned int slot_id, ep_index, ep_state; struct urb_priv *urb_priv; - int size, i; + int num_tds; if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, true, __func__) <= 0) @@ -1349,40 +1348,30 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) if (!HCD_HW_ACCESSIBLE(hcd)) { if (!in_interrupt()) xhci_dbg(xhci, "urb submitted during PCI suspend\n"); - ret = -ESHUTDOWN; - goto exit; + return -ESHUTDOWN; } if (usb_endpoint_xfer_isoc(&urb->ep->desc)) - size = urb->number_of_packets; + num_tds = urb->number_of_packets; else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && urb->transfer_buffer_length > 0 && urb->transfer_flags & URB_ZERO_PACKET && !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) - size = 2; + num_tds = 2; else - size = 1; + num_tds = 1; urb_priv = kzalloc(sizeof(struct urb_priv) + - size * sizeof(struct xhci_td *), mem_flags); + num_tds * sizeof(struct xhci_td), mem_flags); if (!urb_priv) return -ENOMEM; - buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); - if (!buffer) { - kfree(urb_priv); - return -ENOMEM; - } - - for (i = 0; i < size; i++) { - urb_priv->td[i] = buffer; - buffer++; - } - - urb_priv->length = size; - urb_priv->td_cnt = 0; + urb_priv->num_tds = num_tds; + urb_priv->num_tds_done = 0; urb->hcpriv = urb_priv; + trace_xhci_urb_enqueue(urb); + if (usb_endpoint_xfer_control(&urb->ep->desc)) { /* Check to see if the max packet size for the default control * endpoint changed during FS device enumeration @@ -1396,69 +1385,51 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) return ret; } } + } - /* We have a spinlock and interrupts disabled, so we must pass - * atomic context to this function, which may allocate memory. - */ - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; + spin_lock_irqsave(&xhci->lock, flags); + + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + ret = -ESHUTDOWN; + goto free_priv; + } + + switch (usb_endpoint_type(&urb->ep->desc)) { + + case USB_ENDPOINT_XFER_CONTROL: ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); - if (ret) - goto free_priv; - spin_unlock_irqrestore(&xhci->lock, flags); - } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; - if (xhci->devs[slot_id]->eps[ep_index].ep_state & - EP_GETTING_STREAMS) { - xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " - "is transitioning to using streams.\n"); - ret = -EINVAL; - } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & - EP_GETTING_NO_STREAMS) { - xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " - "is transitioning to " - "not having streams.\n"); + slot_id, ep_index); + break; + case USB_ENDPOINT_XFER_BULK: + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + if (ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { + xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", + ep_state); ret = -EINVAL; - } else { - ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); + break; } - if (ret) - goto free_priv; - spin_unlock_irqrestore(&xhci->lock, flags); - } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; + ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + break; + + + case USB_ENDPOINT_XFER_INT: ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); - if (ret) - goto free_priv; - spin_unlock_irqrestore(&xhci->lock, flags); - } else { - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; + break; + + case USB_ENDPOINT_XFER_ISOC: ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, slot_id, ep_index); - if (ret) - goto free_priv; - spin_unlock_irqrestore(&xhci->lock, flags); } -exit: - return ret; -dying: - xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " - "non-responsive xHCI host.\n", - urb->ep->desc.bEndpointAddress, urb); - ret = -ESHUTDOWN; + + if (ret) { free_priv: - xhci_urb_free_priv(urb_priv); - urb->hcpriv = NULL; + xhci_urb_free_priv(urb_priv); + urb->hcpriv = NULL; + } spin_unlock_irqrestore(&xhci->lock, flags); return ret; } @@ -1509,6 +1480,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); + + trace_xhci_urb_dequeue(urb); + /* Make sure the URB hasn't completed or been unlinked already */ ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) @@ -1518,10 +1492,10 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HW died, freeing TD."); urb_priv = urb->hcpriv; - for (i = urb_priv->td_cnt; - i < urb_priv->length && xhci->devs[urb->dev->slot_id]; + for (i = urb_priv->num_tds_done; + i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; i++) { - td = urb_priv->td[i]; + td = &urb_priv->td[i]; if (!list_empty(&td->td_list)) list_del_init(&td->td_list); if (!list_empty(&td->cancelled_td_list)) @@ -1544,33 +1518,32 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } urb_priv = urb->hcpriv; - i = urb_priv->td_cnt; - if (i < urb_priv->length) + i = urb_priv->num_tds_done; + if (i < urb_priv->num_tds) xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cancel URB %p, dev %s, ep 0x%x, " "starting at offset 0x%llx", urb, urb->dev->devpath, urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( - urb_priv->td[i]->start_seg, - urb_priv->td[i]->first_trb)); + urb_priv->td[i].start_seg, + urb_priv->td[i].first_trb)); - for (; i < urb_priv->length; i++) { - td = urb_priv->td[i]; + for (; i < urb_priv->num_tds; i++) { + td = &urb_priv->td[i]; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); } /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ - if (!(ep->ep_state & EP_HALT_PENDING)) { + if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } - ep->ep_state |= EP_HALT_PENDING; - ep->stop_cmds_pending++; + ep->ep_state |= EP_STOP_CMD_PENDING; ep->stop_cmd_timer.expires = jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ; add_timer(&ep->stop_cmd_timer); @@ -1809,7 +1782,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); /* Endpoint 0 is always valid */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); - for (i = 1; i < 31; ++i) { + for (i = 1; i < 31; i++) { ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; @@ -1824,32 +1797,32 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, int ret; switch (*cmd_status) { - case COMP_CMD_ABORT: - case COMP_CMD_STOP: + case COMP_COMMAND_ABORTED: + case COMP_STOPPED: xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); ret = -ETIME; break; - case COMP_ENOMEM: + case COMP_RESOURCE_ERROR: dev_warn(&udev->dev, "Not enough host controller resources for new device state.\n"); ret = -ENOMEM; /* FIXME: can we allocate more resources for the HC? */ break; - case COMP_BW_ERR: - case COMP_2ND_BW_ERR: + case COMP_BANDWIDTH_ERROR: + case COMP_SECONDARY_BANDWIDTH_ERROR: dev_warn(&udev->dev, "Not enough bandwidth for new device state.\n"); ret = -ENOSPC; /* FIXME: can we go back to the old state? */ break; - case COMP_TRB_ERR: + case COMP_TRB_ERROR: /* the HCD set up something wrong */ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " "add flag = 1, " "and endpoint is not disabled.\n"); ret = -EINVAL; break; - case COMP_DEV_ERR: + case COMP_INCOMPATIBLE_DEVICE_ERROR: dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint configure command.\n"); ret = -ENODEV; @@ -1875,33 +1848,33 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; switch (*cmd_status) { - case COMP_CMD_ABORT: - case COMP_CMD_STOP: + case COMP_COMMAND_ABORTED: + case COMP_STOPPED: xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); ret = -ETIME; break; - case COMP_EINVAL: + case COMP_PARAMETER_ERROR: dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate context command.\n"); ret = -EINVAL; break; - case COMP_EBADSLT: + case COMP_SLOT_NOT_ENABLED_ERROR: dev_warn(&udev->dev, "WARN: slot not enabled for evaluate context command.\n"); ret = -EINVAL; break; - case COMP_CTX_STATE: + case COMP_CONTEXT_STATE_ERROR: dev_warn(&udev->dev, "WARN: invalid context state for evaluate context command.\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); ret = -EINVAL; break; - case COMP_DEV_ERR: + case COMP_INCOMPATIBLE_DEVICE_ERROR: dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate context command.\n"); ret = -ENODEV; break; - case COMP_MEL_ERR: + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: /* Max Exit Latency too large error */ dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); ret = -EINVAL; @@ -2781,7 +2754,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); /* Free any rings that were dropped, but not changed. */ - for (i = 1; i < 31; ++i) { + for (i = 1; i < 31; i++) { if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); @@ -2793,7 +2766,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) * Install any rings for completely new endpoints or changed endpoints, * and free or cache any old rings from changed endpoints. */ - for (i = 1; i < 31; ++i) { + for (i = 1; i < 31; i++) { if (!virt_dev->eps[i].new_ring) continue; /* Only cache or free the old ring if it exists. @@ -2827,7 +2800,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; /* Free any rings allocated for added endpoints */ - for (i = 0; i < 31; ++i) { + for (i = 0; i < 31; i++) { if (virt_dev->eps[i].new_ring) { xhci_ring_free(xhci, virt_dev->eps[i].new_ring); virt_dev->eps[i].new_ring = NULL; @@ -3497,13 +3470,13 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) */ ret = reset_device_cmd->status; switch (ret) { - case COMP_CMD_ABORT: - case COMP_CMD_STOP: + case COMP_COMMAND_ABORTED: + case COMP_STOPPED: xhci_warn(xhci, "Timeout waiting for reset device command\n"); ret = -ETIME; goto command_cleanup; - case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ - case COMP_CTX_STATE: /* 0.96 completion code for same thing */ + case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ + case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", slot_id, xhci_get_slot_state(xhci, virt_dev->out_ctx)); @@ -3533,7 +3506,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; - for (i = 1; i < 31; ++i) { + for (i = 1; i < 31; i++) { struct xhci_virt_ep *ep = &virt_dev->eps[i]; if (ep->ep_state & EP_HAS_STREAMS) { @@ -3609,8 +3582,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) virt_dev = xhci->devs[udev->slot_id]; /* Stop any wayward timer functions (which may grab the lock) */ - for (i = 0; i < 31; ++i) { - virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; + for (i = 0; i < 31; i++) { + virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } @@ -3844,6 +3817,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, le32_to_cpu(slot_ctx->dev_info) >> 27); spin_lock_irqsave(&xhci->lock, flags); + trace_xhci_setup_device(virt_dev); ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, udev->slot_id, setup); if (ret) { @@ -3863,22 +3837,22 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, * command on a timeout. */ switch (command->status) { - case COMP_CMD_ABORT: - case COMP_CMD_STOP: + case COMP_COMMAND_ABORTED: + case COMP_STOPPED: xhci_warn(xhci, "Timeout while waiting for setup device command\n"); ret = -ETIME; break; - case COMP_CTX_STATE: - case COMP_EBADSLT: + case COMP_CONTEXT_STATE_ERROR: + case COMP_SLOT_NOT_ENABLED_ERROR: xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", act, udev->slot_id); ret = -EINVAL; break; - case COMP_TX_ERR: + case COMP_USB_TRANSACTION_ERROR: dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); ret = -EPROTO; break; - case COMP_DEV_ERR: + case COMP_INCOMPATIBLE_DEVICE_ERROR: dev_warn(&udev->dev, "ERROR: Incompatible device for setup %s command\n", act); ret = -ENODEV; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2d7b6374b58d..da3eb695fe54 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -912,7 +912,7 @@ struct xhci_virt_ep { unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) #define EP_HALTED (1 << 1) /* For stall handling */ -#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ +#define EP_STOP_CMD_PENDING (1 << 2) /* For URB cancellation */ /* Transitioning the endpoint to using streams, don't enqueue URBs */ #define EP_GETTING_STREAMS (1 << 3) #define EP_HAS_STREAMS (1 << 4) @@ -924,7 +924,6 @@ struct xhci_virt_ep { unsigned int stopped_stream; /* Watchdog timer for stop endpoint command to cancel URBs */ struct timer_list stop_cmd_timer; - int stop_cmds_pending; struct xhci_hcd *xhci; /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue * command. We'll need to update the ring's dequeue segment and dequeue @@ -1061,76 +1060,122 @@ struct xhci_transfer_event { /* Completion Code - only applicable for some types of TRBs */ #define COMP_CODE_MASK (0xff << 24) #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) -#define COMP_SUCCESS 1 -/* Data Buffer Error */ -#define COMP_DB_ERR 2 -/* Babble Detected Error */ -#define COMP_BABBLE 3 -/* USB Transaction Error */ -#define COMP_TX_ERR 4 -/* TRB Error - some TRB field is invalid */ -#define COMP_TRB_ERR 5 -/* Stall Error - USB device is stalled */ -#define COMP_STALL 6 -/* Resource Error - HC doesn't have memory for that device configuration */ -#define COMP_ENOMEM 7 -/* Bandwidth Error - not enough room in schedule for this dev config */ -#define COMP_BW_ERR 8 -/* No Slots Available Error - HC ran out of device slots */ -#define COMP_ENOSLOTS 9 -/* Invalid Stream Type Error */ -#define COMP_STREAM_ERR 10 -/* Slot Not Enabled Error - doorbell rung for disabled device slot */ -#define COMP_EBADSLT 11 -/* Endpoint Not Enabled Error */ -#define COMP_EBADEP 12 -/* Short Packet */ -#define COMP_SHORT_TX 13 -/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */ -#define COMP_UNDERRUN 14 -/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */ -#define COMP_OVERRUN 15 -/* Virtual Function Event Ring Full Error */ -#define COMP_VF_FULL 16 -/* Parameter Error - Context parameter is invalid */ -#define COMP_EINVAL 17 -/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */ -#define COMP_BW_OVER 18 -/* Context State Error - illegal context state transition requested */ -#define COMP_CTX_STATE 19 -/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */ -#define COMP_PING_ERR 20 -/* Event Ring is full */ -#define COMP_ER_FULL 21 -/* Incompatible Device Error */ -#define COMP_DEV_ERR 22 -/* Missed Service Error - HC couldn't service an isoc ep within interval */ -#define COMP_MISSED_INT 23 -/* Successfully stopped command ring */ -#define COMP_CMD_STOP 24 -/* Successfully aborted current command and stopped command ring */ -#define COMP_CMD_ABORT 25 -/* Stopped - transfer was terminated by a stop endpoint command */ -#define COMP_STOP 26 -/* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */ -#define COMP_STOP_INVAL 27 -/* Same as COMP_EP_STOPPED, but a short packet detected */ -#define COMP_STOP_SHORT 28 -/* Max Exit Latency Too Large Error */ -#define COMP_MEL_ERR 29 -/* TRB type 30 reserved */ -/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */ -#define COMP_BUFF_OVER 31 -/* Event Lost Error - xHC has an "internal event overrun condition" */ -#define COMP_ISSUES 32 -/* Undefined Error - reported when other error codes don't apply */ -#define COMP_UNKNOWN 33 -/* Invalid Stream ID Error */ -#define COMP_STRID_ERR 34 -/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */ -#define COMP_2ND_BW_ERR 35 -/* Split Transaction Error */ -#define COMP_SPLIT_ERR 36 +#define COMP_INVALID 0 +#define COMP_SUCCESS 1 +#define COMP_DATA_BUFFER_ERROR 2 +#define COMP_BABBLE_DETECTED_ERROR 3 +#define COMP_USB_TRANSACTION_ERROR 4 +#define COMP_TRB_ERROR 5 +#define COMP_STALL_ERROR 6 +#define COMP_RESOURCE_ERROR 7 +#define COMP_BANDWIDTH_ERROR 8 +#define COMP_NO_SLOTS_AVAILABLE_ERROR 9 +#define COMP_INVALID_STREAM_TYPE_ERROR 10 +#define COMP_SLOT_NOT_ENABLED_ERROR 11 +#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12 +#define COMP_SHORT_PACKET 13 +#define COMP_RING_UNDERRUN 14 +#define COMP_RING_OVERRUN 15 +#define COMP_VF_EVENT_RING_FULL_ERROR 16 +#define COMP_PARAMETER_ERROR 17 +#define COMP_BANDWIDTH_OVERRUN_ERROR 18 +#define COMP_CONTEXT_STATE_ERROR 19 +#define COMP_NO_PING_RESPONSE_ERROR 20 +#define COMP_EVENT_RING_FULL_ERROR 21 +#define COMP_INCOMPATIBLE_DEVICE_ERROR 22 +#define COMP_MISSED_SERVICE_ERROR 23 +#define COMP_COMMAND_RING_STOPPED 24 +#define COMP_COMMAND_ABORTED 25 +#define COMP_STOPPED 26 +#define COMP_STOPPED_LENGTH_INVALID 27 +#define COMP_STOPPED_SHORT_PACKET 28 +#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29 +#define COMP_ISOCH_BUFFER_OVERRUN 31 +#define COMP_EVENT_LOST_ERROR 32 +#define COMP_UNDEFINED_ERROR 33 +#define COMP_INVALID_STREAM_ID_ERROR 34 +#define COMP_SECONDARY_BANDWIDTH_ERROR 35 +#define COMP_SPLIT_TRANSACTION_ERROR 36 + +static inline const char *xhci_trb_comp_code_string(u8 status) +{ + switch (status) { + case COMP_INVALID: + return "Invalid"; + case COMP_SUCCESS: + return "Success"; + case COMP_DATA_BUFFER_ERROR: + return "Data Buffer Error"; + case COMP_BABBLE_DETECTED_ERROR: + return "Babble Detected"; + case COMP_USB_TRANSACTION_ERROR: + return "USB Transaction Error"; + case COMP_TRB_ERROR: + return "TRB Error"; + case COMP_STALL_ERROR: + return "Stall Error"; + case COMP_RESOURCE_ERROR: + return "Resource Error"; + case COMP_BANDWIDTH_ERROR: + return "Bandwidth Error"; + case COMP_NO_SLOTS_AVAILABLE_ERROR: + return "No Slots Available Error"; + case COMP_INVALID_STREAM_TYPE_ERROR: + return "Invalid Stream Type Error"; + case COMP_SLOT_NOT_ENABLED_ERROR: + return "Slot Not Enabled Error"; + case COMP_ENDPOINT_NOT_ENABLED_ERROR: + return "Endpoint Not Enabled Error"; + case COMP_SHORT_PACKET: + return "Short Packet"; + case COMP_RING_UNDERRUN: + return "Ring Underrun"; + case COMP_RING_OVERRUN: + return "Ring Overrun"; + case COMP_VF_EVENT_RING_FULL_ERROR: + return "VF Event Ring Full Error"; + case COMP_PARAMETER_ERROR: + return "Parameter Error"; + case COMP_BANDWIDTH_OVERRUN_ERROR: + return "Bandwidth Overrun Error"; + case COMP_CONTEXT_STATE_ERROR: + return "Context State Error"; + case COMP_NO_PING_RESPONSE_ERROR: + return "No Ping Response Error"; + case COMP_EVENT_RING_FULL_ERROR: + return "Event Ring Full Error"; + case COMP_INCOMPATIBLE_DEVICE_ERROR: + return "Incompatible Device Error"; + case COMP_MISSED_SERVICE_ERROR: + return "Missed Service Error"; + case COMP_COMMAND_RING_STOPPED: + return "Command Ring Stopped"; + case COMP_COMMAND_ABORTED: + return "Command Aborted"; + case COMP_STOPPED: + return "Stopped"; + case COMP_STOPPED_LENGTH_INVALID: + return "Stopped - Length Invalid"; + case COMP_STOPPED_SHORT_PACKET: + return "Stopped - Short Packet"; + case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: + return "Max Exit Latency Too Large Error"; + case COMP_ISOCH_BUFFER_OVERRUN: + return "Isoch Buffer Overrun"; + case COMP_EVENT_LOST_ERROR: + return "Event Lost Error"; + case COMP_UNDEFINED_ERROR: + return "Undefined Error"; + case COMP_INVALID_STREAM_ID_ERROR: + return "Invalid Stream ID Error"; + case COMP_SECONDARY_BANDWIDTH_ERROR: + return "Secondary Bandwidth Error"; + case COMP_SPLIT_TRANSACTION_ERROR: + return "Split Transaction Error"; + default: + return "Unknown!!"; + } +} struct xhci_link_trb { /* 64-bit segment pointer*/ @@ -1154,6 +1199,27 @@ struct xhci_event_cmd { /* Address device - disable SetAddress */ #define TRB_BSR (1<<9) + +/* Configure Endpoint - Deconfigure */ +#define TRB_DC (1<<9) + +/* Stop Ring - Transfer State Preserve */ +#define TRB_TSP (1<<9) + +/* Force Event */ +#define TRB_TO_VF_INTR_TARGET(p) (((p) & (0x3ff << 22)) >> 22) +#define TRB_TO_VF_ID(p) (((p) & (0xff << 16)) >> 16) + +/* Set Latency Tolerance Value */ +#define TRB_TO_BELT(p) (((p) & (0xfff << 16)) >> 16) + +/* Get Port Bandwidth */ +#define TRB_TO_DEV_SPEED(p) (((p) & (0xf << 16)) >> 16) + +/* Force Header */ +#define TRB_TO_PACKET_TYPE(p) ((p) & 0x1f) +#define TRB_TO_ROOTHUB_PORT(p) (((p) & (0xff << 24)) >> 24) + enum xhci_setup_dev { SETUP_CONTEXT_ONLY, SETUP_CONTEXT_ADDRESS, @@ -1177,16 +1243,21 @@ enum xhci_setup_dev { #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) #define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +/* Link TRB specific fields */ +#define TRB_TC (1<<1) /* Port Status Change Event TRB fields */ /* Port ID - bits 31:24 */ #define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24) +#define EVENT_DATA (1 << 2) + /* Normal TRB fields */ /* transfer_len bitmasks - bits 0:16 */ #define TRB_LEN(p) ((p) & 0x1ffff) /* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */ #define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) +#define GET_TD_SIZE(p) (((p) & 0x3e0000) >> 17) /* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */ #define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17) /* Interrupter Target - which MSI-X vector to target the completion event at */ @@ -1314,6 +1385,80 @@ union xhci_trb { /* Get NEC firmware revision. */ #define TRB_NEC_GET_FW 49 +static inline const char *xhci_trb_type_string(u8 type) +{ + switch (type) { + case TRB_NORMAL: + return "Normal"; + case TRB_SETUP: + return "Setup Stage"; + case TRB_DATA: + return "Data Stage"; + case TRB_STATUS: + return "Status Stage"; + case TRB_ISOC: + return "Isoch"; + case TRB_LINK: + return "Link"; + case TRB_EVENT_DATA: + return "Event Data"; + case TRB_TR_NOOP: + return "No-Op"; + case TRB_ENABLE_SLOT: + return "Enable Slot Command"; + case TRB_DISABLE_SLOT: + return "Disable Slot Command"; + case TRB_ADDR_DEV: + return "Address Device Command"; + case TRB_CONFIG_EP: + return "Configure Endpoint Command"; + case TRB_EVAL_CONTEXT: + return "Evaluate Context Command"; + case TRB_RESET_EP: + return "Reset Endpoint Command"; + case TRB_STOP_RING: + return "Stop Ring Command"; + case TRB_SET_DEQ: + return "Set TR Dequeue Pointer Command"; + case TRB_RESET_DEV: + return "Reset Device Command"; + case TRB_FORCE_EVENT: + return "Force Event Command"; + case TRB_NEG_BANDWIDTH: + return "Negotiate Bandwidth Command"; + case TRB_SET_LT: + return "Set Latency Tolerance Value Command"; + case TRB_GET_BW: + return "Get Port Bandwidth Command"; + case TRB_FORCE_HEADER: + return "Force Header Command"; + case TRB_CMD_NOOP: + return "No-Op Command"; + case TRB_TRANSFER: + return "Transfer Event"; + case TRB_COMPLETION: + return "Command Completion Event"; + case TRB_PORT_STATUS: + return "Port Status Change Event"; + case TRB_BANDWIDTH_EVENT: + return "Bandwidth Request Event"; + case TRB_DOORBELL: + return "Doorbell Event"; + case TRB_HC_EVENT: + return "Host Controller Event"; + case TRB_DEV_NOTE: + return "Device Notification Event"; + case TRB_MFINDEX_WRAP: + return "MFINDEX Wrap Event"; + case TRB_NEC_CMD_COMP: + return "NEC Command Completion Event"; + case TRB_NEC_GET_FW: + return "NET Get Firmware Revision Command"; + default: + return "UNKNOWN"; + } +} + #define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) /* Above, but for __le32 types -- can avoid work by swapping constants: */ #define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ @@ -1390,6 +1535,28 @@ enum xhci_ring_type { TYPE_EVENT, }; +static inline const char *xhci_ring_type_string(enum xhci_ring_type type) +{ + switch (type) { + case TYPE_CTRL: + return "CTRL"; + case TYPE_ISOC: + return "ISOC"; + case TYPE_BULK: + return "BULK"; + case TYPE_INTR: + return "INTR"; + case TYPE_STREAM: + return "STREAM"; + case TYPE_COMMAND: + return "CMD"; + case TYPE_EVENT: + return "EVENT"; + } + + return "UNKNOWN"; +} + struct xhci_ring { struct xhci_segment *first_seg; struct xhci_segment *last_seg; @@ -1441,9 +1608,9 @@ struct xhci_scratchpad { }; struct urb_priv { - int length; - int td_cnt; - struct xhci_td *td[0]; + int num_tds; + int num_tds_done; + struct xhci_td td[0]; }; /* @@ -1549,7 +1716,6 @@ struct xhci_hcd { u8 max_ports; u8 isoc_threshold; int event_ring_max; - int addr_64; /* 4KB min, 128MB max */ int page_size; /* Valid values are 12 to 20, inclusive */ @@ -1650,6 +1816,9 @@ struct xhci_hcd { #define XHCI_SSIC_PORT_UNUSED (1 << 22) #define XHCI_NO_64BIT_SUPPORT (1 << 23) #define XHCI_MISSING_CAS (1 << 24) +/* For controller with a broken Port Disable implementation */ +#define XHCI_BROKEN_PORT_PED (1 << 25) + unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1984,4 +2153,211 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, urb->stream_id); } +static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2, + u32 field3) +{ + static char str[256]; + int type = TRB_FIELD_TO_TYPE(field3); + + switch (type) { + case TRB_LINK: + sprintf(str, + "TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c", + field1, field0, + xhci_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_TRANSFER: + case TRB_COMPLETION: + case TRB_PORT_STATUS: + case TRB_BANDWIDTH_EVENT: + case TRB_DOORBELL: + case TRB_HC_EVENT: + case TRB_DEV_NOTE: + case TRB_MFINDEX_WRAP: + sprintf(str, + "TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c", + field1, field0, + xhci_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + + break; + case TRB_SETUP: + sprintf(str, + "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c", + field0 & 0xff, + (field0 & 0xff00) >> 8, + (field0 & 0xff000000) >> 24, + (field0 & 0xff0000) >> 16, + (field1 & 0xff00) >> 8, + field1 & 0xff, + (field1 & 0xff000000) >> 16 | + (field1 & 0xff0000) >> 16, + TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_NORMAL: + case TRB_DATA: + case TRB_STATUS: + case TRB_ISOC: + case TRB_EVENT_DATA: + case TRB_TR_NOOP: + sprintf(str, + "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c", + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + + case TRB_CMD_NOOP: + case TRB_ENABLE_SLOT: + sprintf(str, + "%s: flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DISABLE_SLOT: + case TRB_NEG_BANDWIDTH: + sprintf(str, + "%s: slot %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ADDR_DEV: + sprintf(str, + "%s: ctx %08x%08x slot %d flags %c:%c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_BSR ? 'B' : 'b', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_CONFIG_EP: + sprintf(str, + "%s: ctx %08x%08x slot %d flags %c:%c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_DC ? 'D' : 'd', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_EVAL_CONTEXT: + sprintf(str, + "%s: ctx %08x%08x slot %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_EP: + sprintf(str, + "%s: ctx %08x%08x slot %d ep %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STOP_RING: + sprintf(str, + "%s: slot %d sp %d ep %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + TRB_TO_SLOT_ID(field3), + TRB_TO_SUSPEND_PORT(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_DEQ: + sprintf(str, + "%s: deq %08x%08x stream %d slot %d ep %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_STREAM_ID(field2), + TRB_TO_SLOT_ID(field3), + /* Macro decrements 1, maybe it shouldn't?!? */ + TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_DEV: + sprintf(str, + "%s: slot %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_FORCE_EVENT: + sprintf(str, + "%s: event %08x%08x vf intr %d vf id %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_VF_INTR_TARGET(field2), + TRB_TO_VF_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_LT: + sprintf(str, + "%s: belt %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + TRB_TO_BELT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_GET_BW: + sprintf(str, + "%s: ctx %08x%08x slot %d speed %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field1, field0, + TRB_TO_SLOT_ID(field3), + TRB_TO_DEV_SPEED(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_FORCE_HEADER: + sprintf(str, + "%s: info %08x%08x%08x pkt type %d roothub port %d flags %c", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field2, field1, field0 & 0xffffffe0, + TRB_TO_PACKET_TYPE(field0), + TRB_TO_ROOTHUB_PORT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); + break; + default: + sprintf(str, + "type '%s' -> raw %08x %08x %08x %08x", + xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)), + field0, field1, field2, field3); + } + + return str; +} + + #endif /* __LINUX_XHCI_HCD_H */ diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index 1c3d0fd658fa..69400f3da886 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c @@ -1250,7 +1250,7 @@ static int isp1760_udc_stop(struct usb_gadget *gadget) return 0; } -static struct usb_gadget_ops isp1760_udc_ops = { +static const struct usb_gadget_ops isp1760_udc_ops = { .get_frame = isp1760_udc_get_frame, .wakeup = isp1760_udc_wakeup, .set_selfpowered = isp1760_udc_set_selfpowered, diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h index ba9df719f363..aa6fd6a51221 100644 --- a/drivers/usb/mtu3/mtu3.h +++ b/drivers/usb/mtu3/mtu3.h @@ -225,6 +225,7 @@ struct ssusb_mtk { /* common power & clock */ struct regulator *vusb33; struct clk *sys_clk; + struct clk *ref_clk; /* otg */ struct otg_switch_mtk otg_switch; enum usb_dr_mode dr_mode; diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c index 783367805c99..19a345d6687d 100644 --- a/drivers/usb/mtu3/mtu3_plat.c +++ b/drivers/usb/mtu3/mtu3_plat.c @@ -123,7 +123,13 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb) ret = clk_prepare_enable(ssusb->sys_clk); if (ret) { dev_err(ssusb->dev, "failed to enable sys_clk\n"); - goto clk_err; + goto sys_clk_err; + } + + ret = clk_prepare_enable(ssusb->ref_clk); + if (ret) { + dev_err(ssusb->dev, "failed to enable ref_clk\n"); + goto ref_clk_err; } ret = ssusb_phy_init(ssusb); @@ -143,8 +149,10 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb) phy_err: ssusb_phy_exit(ssusb); phy_init_err: + clk_disable_unprepare(ssusb->ref_clk); +ref_clk_err: clk_disable_unprepare(ssusb->sys_clk); -clk_err: +sys_clk_err: regulator_disable(ssusb->vusb33); vusb33_err: @@ -154,6 +162,7 @@ vusb33_err: static void ssusb_rscs_exit(struct ssusb_mtk *ssusb) { clk_disable_unprepare(ssusb->sys_clk); + clk_disable_unprepare(ssusb->ref_clk); regulator_disable(ssusb->vusb33); ssusb_phy_power_off(ssusb); ssusb_phy_exit(ssusb); @@ -204,6 +213,24 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb) int i; int ret; + ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33"); + if (IS_ERR(ssusb->vusb33)) { + dev_err(dev, "failed to get vusb33\n"); + return PTR_ERR(ssusb->vusb33); + } + + ssusb->sys_clk = devm_clk_get(dev, "sys_ck"); + if (IS_ERR(ssusb->sys_clk)) { + dev_err(dev, "failed to get sys clock\n"); + return PTR_ERR(ssusb->sys_clk); + } + + ssusb->ref_clk = devm_clk_get(dev, "ref_ck"); + if (IS_ERR(ssusb->ref_clk)) { + dev_err(dev, "failed to get ref clock\n"); + return PTR_ERR(ssusb->ref_clk); + } + ssusb->num_phys = of_count_phandle_with_args(node, "phys", "#phy-cells"); if (ssusb->num_phys > 0) { @@ -230,18 +257,6 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb) return PTR_ERR(ssusb->ippc_base); } - ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33"); - if (IS_ERR(ssusb->vusb33)) { - dev_err(dev, "failed to get vusb33\n"); - return PTR_ERR(ssusb->vusb33); - } - - ssusb->sys_clk = devm_clk_get(dev, "sys_ck"); - if (IS_ERR(ssusb->sys_clk)) { - dev_err(dev, "failed to get sys clock\n"); - return PTR_ERR(ssusb->sys_clk); - } - ssusb->dr_mode = usb_get_dr_mode(dev); if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) { dev_err(dev, "dr_mode is error\n"); @@ -428,6 +443,7 @@ static int __maybe_unused mtu3_suspend(struct device *dev) ssusb_host_disable(ssusb, true); ssusb_phy_power_off(ssusb); clk_disable_unprepare(ssusb->sys_clk); + clk_disable_unprepare(ssusb->ref_clk); ssusb_wakeup_enable(ssusb); return 0; @@ -445,6 +461,7 @@ static int __maybe_unused mtu3_resume(struct device *dev) ssusb_wakeup_disable(ssusb); clk_prepare_enable(ssusb->sys_clk); + clk_prepare_enable(ssusb->ref_clk); ssusb_phy_power_on(ssusb); ssusb_host_enable(ssusb); diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index bc8889956d17..40c68c23d553 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -63,7 +63,7 @@ static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = { { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, }, }; -static struct musb_hdrc_config jz4740_musb_config = { +static const struct musb_hdrc_config jz4740_musb_config = { /* Silicon does not implement USB OTG. */ .multipoint = 0, /* Max EPs scanned, driver will decide which EP can be used. */ diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index d0be0eadd0d9..64545de4871f 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -645,7 +645,7 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = { MUSB_EP_FIFO_SINGLE(5, FIFO_RX, 512), }; -static struct musb_hdrc_config sunxi_musb_hdrc_config = { +static const struct musb_hdrc_config sunxi_musb_hdrc_config = { .fifo_cfg = sunxi_musb_mode_cfg, .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg), .multipoint = true, diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index 3eaa4ba6867d..5a572500c418 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -30,7 +30,7 @@ #include "musb_core.h" -static struct musb_hdrc_config ux500_musb_hdrc_config = { +static const struct musb_hdrc_config ux500_musb_hdrc_config = { .multipoint = true, .dyn_fifo = true, .num_eps = 16, diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 02bdaa912164..369f3c24815a 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -1364,7 +1364,6 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk) { - u16 Newblk; u16 blk; struct ms_lib_type_extdat extdat; /* need check */ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; @@ -1377,7 +1376,6 @@ static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk) if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0) blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT; - Newblk = info->MS_Lib.Phy2LogMap[blk]; if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED) { return blk; } else if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) { diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c index 3aeaa536c44f..44f8ffccd031 100644 --- a/drivers/usb/storage/sddr09.c +++ b/drivers/usb/storage/sddr09.c @@ -870,13 +870,12 @@ sddr09_write_lba(struct us_data *us, unsigned int lba, unsigned int pagelen; unsigned char *bptr, *cptr, *xptr; unsigned char ecc[3]; - int i, result, isnew; + int i, result; lbap = ((lba % 1000) << 1) | 0x1000; if (parity[MSB_of(lbap) ^ LSB_of(lbap)]) lbap ^= 1; pba = info->lba_to_pba[lba]; - isnew = 0; if (pba == UNDEF) { pba = sddr09_find_unused_pba(info, lba); @@ -887,7 +886,6 @@ sddr09_write_lba(struct us_data *us, unsigned int lba, } info->pba_to_lba[pba] = lba; info->lba_to_pba[lba] = pba; - isnew = 1; } if (pba == 1) { diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 253310cdaaca..fd6c8b66f06f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - unsigned out, in; + unsigned int out = 0, in = 0; int head, ret, prot_bytes; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); size_t out_size, in_size; @@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { NULL, }; -static struct target_core_fabric_ops vhost_scsi_ops = { +static const struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .name = "vhost", .get_fabric_name = vhost_scsi_get_fabric_name, diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 112ce422dc22..2a165cc8a43c 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -42,6 +42,7 @@ static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; +static uint64_t callback_via; static unsigned long alloc_xen_mmio(unsigned long len) { @@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len) return addr; } +static uint64_t get_callback_via(struct pci_dev *pdev) +{ + u8 pin; + int irq; + + irq = pdev->irq; + if (irq < 16) + return irq; /* ISA IRQ */ + + pin = pdev->pin; + + /* We don't know the GSI. Specify the PCI INTx line instead. */ + return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */ + ((uint64_t)pci_domain_nr(pdev->bus) << 32) | + ((uint64_t)pdev->bus->number << 16) | + ((uint64_t)(pdev->devfn & 0xff) << 8) | + ((uint64_t)(pin - 1) & 3); +} + +static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) +{ + xen_hvm_evtchn_do_upcall(); + return IRQ_HANDLED; +} + +static int xen_allocate_irq(struct pci_dev *pdev) +{ + return request_irq(pdev->irq, do_hvm_evtchn_intr, + IRQF_NOBALANCING | IRQF_TRIGGER_RISING, + "xen-platform-pci", pdev); +} + +static int platform_pci_resume(struct pci_dev *pdev) +{ + int err; + if (!xen_pv_domain()) + return 0; + err = xen_set_callback_via(callback_via); + if (err) { + dev_err(&pdev->dev, "platform_pci_resume failure!\n"); + return err; + } + return 0; +} + static int platform_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev, platform_mmio = mmio_addr; platform_mmiolen = mmio_len; + /* + * Xen HVM guests always use the vector callback mechanism. + * L1 Dom0 in a nested Xen environment is a PV guest inside in an + * HVM environment. It needs the platform-pci driver to get + * notifications from L0 Xen, but it cannot use the vector callback + * as it is not exported by L1 Xen. + */ + if (xen_pv_domain()) { + ret = xen_allocate_irq(pdev); + if (ret) { + dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); + goto out; + } + callback_via = get_callback_via(pdev); + ret = xen_set_callback_via(callback_via); + if (ret) { + dev_warn(&pdev->dev, "Unable to set the evtchn callback " + "err=%d\n", ret); + goto out; + } + } + max_nr_gframes = gnttab_max_grant_frames(); grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); @@ -123,6 +191,9 @@ static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_probe, .id_table = platform_pci_tbl, +#ifdef CONFIG_PM + .resume_early = platform_pci_resume, +#endif }; builtin_pci_driver(platform_driver); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index baea866a6751..94fd76d04683 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, add_wait_queue(&ci->i_cap_wq, &wait); while (!try_get_cap_refs(ci, need, want, endoff, - true, &_got, &err)) + true, &_got, &err)) { + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } remove_wait_queue(&ci->i_cap_wq, &wait); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d7a93696663b..8ab1fdf0bd49 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) struct ceph_mds_client *mdsc = ceph_sb_to_client(dir->i_sb)->mdsc; struct ceph_mds_request *req; - int op, mask, err; + int op, err; + u32 mask; if (flags & LOOKUP_RCU) return -ECHILD; @@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; if (ceph_security_xattr_wanted(dir)) mask |= CEPH_CAP_XATTR_SHARED; - req->r_args.getattr.mask = mask; + req->r_args.getattr.mask = cpu_to_le32(mask); err = ceph_mdsc_do_request(mdsc, NULL, req); switch (err) { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 398e5328b309..5e659d054b40 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r) { struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; - return ceph_frag_compare(ls->frag, rs->frag); + return ceph_frag_compare(le32_to_cpu(ls->frag), + le32_to_cpu(rs->frag)); } static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ec6b35e9f966..c9d2e553a6c4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { - if (info->head->op == CEPH_MDS_OP_GETFILELOCK) + u32 op = le32_to_cpu(info->head->op); + + if (op == CEPH_MDS_OP_GETFILELOCK) return parse_reply_info_filelock(p, end, info, features); - else if (info->head->op == CEPH_MDS_OP_READDIR || - info->head->op == CEPH_MDS_OP_LSSNAP) + else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) return parse_reply_info_dir(p, end, info, features); - else if (info->head->op == CEPH_MDS_OP_CREATE) + else if (op == CEPH_MDS_OP_CREATE) return parse_reply_info_create(p, end, info, features); else return -EIO; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 70ea57c7b6bb..4e06a27ed7f8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) struct fuse_req *req; req = list_entry(head->next, struct fuse_req, list); req->out.h.error = -ECONNABORTED; - clear_bit(FR_PENDING, &req->flags); clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); request_end(fc, req); @@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc) spin_lock(&fiq->waitq.lock); fiq->connected = 0; list_splice_init(&fiq->pending, &to_end2); + list_for_each_entry(req, &to_end2, list) + clear_bit(FR_PENDING, &req->flags); while (forget_pending(fiq)) kfree(dequeue_forget(fiq, 1, NULL)); wake_up_all_locked(&fiq->waitq); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 1f7c732f32b0..811fd8929a18 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec) if (sec || nsec) { struct timespec64 ts = { sec, - max_t(u32, nsec, NSEC_PER_SEC - 1) + min_t(u32, nsec, NSEC_PER_SEC - 1) }; return get_jiffies_64() + timespec64_to_jiffies(&ts); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6dcbc5defb7a..ecc151697fd4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -38,7 +38,6 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/errno.h> -#include <linux/file.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/printk.h> @@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt, return nfs4_call_sync_sequence(clnt, server, msg, args, res); } -static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) +static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, + unsigned long timestamp) { struct nfs_inode *nfsi = NFS_I(dir); @@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) NFS_INO_INVALID_ACL; } dir->i_version = cinfo->after; + nfsi->read_cache_jiffies = timestamp; nfsi->attr_gencount = nfs_inc_attr_generation_counter(); nfs_fscache_invalidate(dir); spin_unlock(&dir->i_lock); @@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) nfs_fattr_map_and_free_names(server, &data->f_attr); if (o_arg->open_flags & O_CREAT) { - update_changeattr(dir, &o_res->cinfo); if (o_arg->open_flags & O_EXCL) data->file_created = 1; else if (o_res->cinfo.before != o_res->cinfo.after) data->file_created = 1; + if (data->file_created || dir->i_version != o_res->cinfo.after) + update_changeattr(dir, &o_res->cinfo, + o_res->f_attr->time_start); } if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) server->caps &= ~NFS_CAP_POSIX_LOCK; @@ -4073,11 +4076,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name) .rpc_argp = &args, .rpc_resp = &res, }; + unsigned long timestamp = jiffies; int status; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == 0) - update_changeattr(dir, &res.cinfo); + update_changeattr(dir, &res.cinfo, timestamp); return status; } @@ -4125,7 +4129,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) return 0; - update_changeattr(dir, &res->cinfo); + if (task->tk_status == 0) + update_changeattr(dir, &res->cinfo, res->dir_attr->time_start); return 1; } @@ -4159,8 +4164,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) return 0; - update_changeattr(old_dir, &res->old_cinfo); - update_changeattr(new_dir, &res->new_cinfo); + if (task->tk_status == 0) { + update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start); + if (new_dir != old_dir) + update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start); + } return 1; } @@ -4197,7 +4205,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { - update_changeattr(dir, &res.cinfo); + update_changeattr(dir, &res.cinfo, res.fattr->time_start); status = nfs_post_op_update_inode(inode, res.fattr); if (!status) nfs_setsecurity(inode, res.fattr, res.label); @@ -4272,7 +4280,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_ int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, &data->arg.seq_args, &data->res.seq_res, 1); if (status == 0) { - update_changeattr(dir, &data->res.dir_cinfo); + update_changeattr(dir, &data->res.dir_cinfo, + data->res.fattr->time_start); status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); } return status; @@ -6127,7 +6136,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); - get_file(fl->fl_file); memcpy(&p->fl, fl, sizeof(p->fl)); return p; out_free_seqid: @@ -6240,7 +6248,6 @@ static void nfs4_lock_release(void *calldata) nfs_free_seqid(data->arg.lock_seqid); nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); - fput(data->fl.fl_file); kfree(data); dprintk("%s: done!\n", __func__); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1d152f4470cd..90e6193ce6be 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1729,7 +1729,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) break; case -NFS4ERR_STALE_CLIENTID: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED: diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 7ecf16be4a44..8fae53ce21d1 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, p++; /* to be backfilled later */ if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { - u32 *supp = nfsd_suppattrs[minorversion]; + u32 supp[3]; + + memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp)); if (!IS_POSIXACL(dentry->d_inode)) supp[0] &= ~FATTR4_WORD0_ACL; diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 9ad48d9202a9..023bb0b03352 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -154,29 +154,38 @@ out_err: static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, struct dentry **ret) { - const char *s = d->name.name; + /* Counting down from the end, since the prefix can change */ + size_t rem = d->name.len - 1; struct dentry *dentry = NULL; int err; - if (*s != '/') + if (d->name.name[0] != '/') return ovl_lookup_single(base, d, d->name.name, d->name.len, 0, "", ret); - while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) { + while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { + const char *s = d->name.name + d->name.len - rem; const char *next = strchrnul(s, '/'); - size_t slen = strlen(s); + size_t thislen = next - s; + bool end = !next[0]; - if (WARN_ON(slen > d->name.len) || - WARN_ON(strcmp(d->name.name + d->name.len - slen, s))) + /* Verify we did not go off the rails */ + if (WARN_ON(s[-1] != '/')) return -EIO; - err = ovl_lookup_single(base, d, s, next - s, - d->name.len - slen, next, &base); + err = ovl_lookup_single(base, d, s, thislen, + d->name.len - rem, next, &base); dput(dentry); if (err) return err; dentry = base; - s = next; + if (end) + break; + + rem -= thislen + 1; + + if (WARN_ON(rem >= d->name.len)) + return -EIO; } *ret = dentry; return 0; diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index 0a908ae7af13..b0d0623c83ed 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig @@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT config UBIFS_FS_ENCRYPTION bool "UBIFS Encryption" - depends on UBIFS_FS + depends on UBIFS_FS && BLOCK select FS_ENCRYPTION default n help diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 1c5331ac9614..528369f3e472 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry, dbg_gen("dent '%pd', mode %#hx in dir ino %lu", dentry, mode, dir->i_ino); - if (ubifs_crypt_is_encrypted(dir)) { - err = fscrypt_get_encryption_info(dir); - if (err) - return err; - - if (!fscrypt_has_encryption_key(dir)) { - return -EPERM; - } - } - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); if (err) return err; @@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, ubifs_assert(inode_is_locked(dir)); ubifs_assert(inode_is_locked(inode)); - if (ubifs_crypt_is_encrypted(dir)) { - if (!fscrypt_has_permitted_context(dir, inode)) - return -EPERM; - - err = fscrypt_get_encryption_info(inode); - if (err) - return err; - - if (!fscrypt_has_encryption_key(inode)) - return -EPERM; - } + if (ubifs_crypt_is_encrypted(dir) && + !fscrypt_has_permitted_context(dir, inode)) + return -EPERM; err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); if (err) @@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) return err; - if (ubifs_crypt_is_encrypted(dir)) { - err = fscrypt_get_encryption_info(dir); - if (err) - goto out_budg; - - if (!fscrypt_has_encryption_key(dir)) { - err = -EPERM; - goto out_budg; - } - } - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); if (err) goto out_budg; @@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry, return err; } - if (ubifs_crypt_is_encrypted(dir)) { - err = fscrypt_get_encryption_info(dir); - if (err) - goto out_budg; - - if (!fscrypt_has_encryption_key(dir)) { - err = -EPERM; - goto out_budg; - } - } - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); if (err) goto out_budg; @@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, goto out_inode; } - err = fscrypt_get_encryption_info(inode); - if (err) { - kfree(sd); - goto out_inode; - } - - if (!fscrypt_has_encryption_key(inode)) { - kfree(sd); - err = -EPERM; - goto out_inode; - } - ostr.name = sd->encrypted_path; ostr.len = disk_link.len; diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 78d713644df3..da519ba205f6 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC32_SETFLAGS: cmd = FS_IOC_SETFLAGS; break; + case FS_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_POLICY: + break; default: return -ENOIOCTLCMD; } diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index a459211a1c21..294519b98874 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, } else { data->compr_size = 0; + out_len = compr_len; } dlen = UBIFS_DATA_NODE_SZ + out_len; @@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in dn->compr_type = cpu_to_le16(compr_type); dn->size = cpu_to_le32(*new_len); *new_len = UBIFS_DATA_NODE_SZ + out_len; + err = 0; out: kfree(buf); return err; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 74ae2de949df..709aa098dd46 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -34,6 +34,11 @@ #include <linux/slab.h> #include "ubifs.h" +static int try_read_node(const struct ubifs_info *c, void *buf, int type, + int len, int lnum, int offs); +static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, + struct ubifs_zbranch *zbr, void *node); + /* * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. * @NAME_LESS: name corresponding to the first argument is less than second @@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, return 0; } - err = ubifs_tnc_read_node(c, zbr, node); + if (c->replaying) { + err = fallible_read_node(c, &zbr->key, zbr, node); + /* + * When the node was not found, return -ENOENT, 0 otherwise. + * Negative return codes stay as-is. + */ + if (err == 0) + err = -ENOENT; + else if (err == 1) + err = 0; + } else { + err = ubifs_tnc_read_node(c, zbr, node); + } if (err) return err; @@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, if (fname_len(nm) > 0) { if (err) { /* Handle collisions */ - err = resolve_collision(c, key, &znode, &n, nm); + if (c->replaying) + err = fallible_resolve_collision(c, key, &znode, &n, + nm, 0); + else + err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index c58d72c220f5..2f389d366e93 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -36,21 +36,29 @@ struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; /* - * @mode, if set, indicates that the type field needs to be set up. - * This uses the transformation from file mode to DT_* as defined in linux/fs.h - * for file type specification. This will be propagated into the directory - * structure if appropriate for the given operation and filesystem config. + * Convert inode mode to directory entry filetype */ -const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = { - [0] = XFS_DIR3_FT_UNKNOWN, - [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE, - [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR, - [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV, - [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV, - [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO, - [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK, - [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK, -}; +unsigned char xfs_mode_to_ftype(int mode) +{ + switch (mode & S_IFMT) { + case S_IFREG: + return XFS_DIR3_FT_REG_FILE; + case S_IFDIR: + return XFS_DIR3_FT_DIR; + case S_IFCHR: + return XFS_DIR3_FT_CHRDEV; + case S_IFBLK: + return XFS_DIR3_FT_BLKDEV; + case S_IFIFO: + return XFS_DIR3_FT_FIFO; + case S_IFSOCK: + return XFS_DIR3_FT_SOCK; + case S_IFLNK: + return XFS_DIR3_FT_SYMLINK; + default: + return XFS_DIR3_FT_UNKNOWN; + } +} /* * ASCII case-insensitive (ie. A-Z) support for directories that was @@ -631,7 +639,8 @@ xfs_dir2_isblock( if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) return rval; rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; - ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize); + if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize) + return -EFSCORRUPTED; *vp = rval; return 0; } diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 0197590fa7d7..d6e6d9d16f6c 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -18,6 +18,9 @@ #ifndef __XFS_DIR2_H__ #define __XFS_DIR2_H__ +#include "xfs_da_format.h" +#include "xfs_da_btree.h" + struct xfs_defer_ops; struct xfs_da_args; struct xfs_inode; @@ -32,10 +35,9 @@ struct xfs_dir2_data_unused; extern struct xfs_name xfs_name_dotdot; /* - * directory filetype conversion tables. + * Convert inode mode to directory entry filetype */ -#define S_SHIFT 12 -extern const unsigned char xfs_mode_to_ftype[]; +extern unsigned char xfs_mode_to_ftype(int mode); /* * directory operations vector for encode/decode routines diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index dd483e2767f7..d93f9d918cfc 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -29,6 +29,7 @@ #include "xfs_icache.h" #include "xfs_trans.h" #include "xfs_ialloc.h" +#include "xfs_dir2.h" /* * Check that none of the inode's in the buffer have a next @@ -386,6 +387,7 @@ xfs_dinode_verify( xfs_ino_t ino, struct xfs_dinode *dip) { + uint16_t mode; uint16_t flags; uint64_t flags2; @@ -396,8 +398,12 @@ xfs_dinode_verify( if (be64_to_cpu(dip->di_size) & (1ULL << 63)) return false; - /* No zero-length symlinks. */ - if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) + mode = be16_to_cpu(dip->di_mode); + if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) + return false; + + /* No zero-length symlinks/dirs. */ + if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0) return false; /* only version 3 or greater inodes are extensively verified here */ diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 7a30b8f11db7..9d06cc30e875 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -710,6 +710,10 @@ xfs_dq_get_next_id( /* Simple advance */ next_id = *id + 1; + /* If we'd wrap past the max ID, stop */ + if (next_id < *id) + return -ENOENT; + /* If new ID is within the current chunk, advancing it sufficed */ if (next_id % mp->m_quotainfo->qi_dqperchunk) { *id = next_id; diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 308bebb6dfd2..22c16155f1b4 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -98,12 +98,27 @@ xfs_init_security( static void xfs_dentry_to_name( struct xfs_name *namep, + struct dentry *dentry) +{ + namep->name = dentry->d_name.name; + namep->len = dentry->d_name.len; + namep->type = XFS_DIR3_FT_UNKNOWN; +} + +static int +xfs_dentry_mode_to_name( + struct xfs_name *namep, struct dentry *dentry, int mode) { namep->name = dentry->d_name.name; namep->len = dentry->d_name.len; - namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT]; + namep->type = xfs_mode_to_ftype(mode); + + if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN)) + return -EFSCORRUPTED; + + return 0; } STATIC void @@ -119,7 +134,7 @@ xfs_cleanup_inode( * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ - xfs_dentry_to_name(&teardown, dentry, 0); + xfs_dentry_to_name(&teardown, dentry); xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); } @@ -154,8 +169,12 @@ xfs_generic_create( if (error) return error; + /* Verify mode is valid also for tmpfile case */ + error = xfs_dentry_mode_to_name(&name, dentry, mode); + if (unlikely(error)) + goto out_free_acl; + if (!tmpfile) { - xfs_dentry_to_name(&name, dentry, mode); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); } else { error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); @@ -248,7 +267,7 @@ xfs_vn_lookup( if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); - xfs_dentry_to_name(&name, dentry, 0); + xfs_dentry_to_name(&name, dentry); error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); if (unlikely(error)) { if (unlikely(error != -ENOENT)) @@ -275,7 +294,7 @@ xfs_vn_ci_lookup( if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); - xfs_dentry_to_name(&xname, dentry, 0); + xfs_dentry_to_name(&xname, dentry); error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); if (unlikely(error)) { if (unlikely(error != -ENOENT)) @@ -310,7 +329,9 @@ xfs_vn_link( struct xfs_name name; int error; - xfs_dentry_to_name(&name, dentry, inode->i_mode); + error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode); + if (unlikely(error)) + return error; error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) @@ -329,7 +350,7 @@ xfs_vn_unlink( struct xfs_name name; int error; - xfs_dentry_to_name(&name, dentry, 0); + xfs_dentry_to_name(&name, dentry); error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); if (error) @@ -359,7 +380,9 @@ xfs_vn_symlink( mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); - xfs_dentry_to_name(&name, dentry, mode); + error = xfs_dentry_mode_to_name(&name, dentry, mode); + if (unlikely(error)) + goto out; error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); if (unlikely(error)) @@ -395,6 +418,7 @@ xfs_vn_rename( { struct inode *new_inode = d_inode(ndentry); int omode = 0; + int error; struct xfs_name oname; struct xfs_name nname; @@ -405,8 +429,14 @@ xfs_vn_rename( if (flags & RENAME_EXCHANGE) omode = d_inode(ndentry)->i_mode; - xfs_dentry_to_name(&oname, odentry, omode); - xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); + error = xfs_dentry_mode_to_name(&oname, odentry, omode); + if (omode && unlikely(error)) + return error; + + error = xfs_dentry_mode_to_name(&nname, ndentry, + d_inode(odentry)->i_mode); + if (unlikely(error)) + return error; return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), XFS_I(ndir), &nname, diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index e467218c0098..7a989de224f4 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) } #define ASSERT_ALWAYS(expr) \ - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) + (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) #ifdef DEBUG #define ASSERT(expr) \ - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) + (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) #ifndef STATIC # define STATIC noinline @@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) #ifdef XFS_WARN #define ASSERT(expr) \ - (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) + (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) #ifndef STATIC # define STATIC static noinline diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index b717ed9d2b75..5c970ce67949 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); +void kvm_timer_init_vhe(void); #endif diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f74ae68086dc..05cf951df3fe 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); -int bpf_prog_calc_digest(struct bpf_prog *fp); +int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 20bfefbe7594..d936a0021839 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -74,6 +74,8 @@ enum cpuhp_state { CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_MIPS_SOC_PREPARE, + CPUHP_BP_PREPARE_DYN, + CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, diff --git a/include/linux/filter.h b/include/linux/filter.h index a0934e6c9bab..e4eb2546339a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -57,6 +57,8 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 +#define BPF_TAG_SIZE 8 + /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -408,7 +410,7 @@ struct bpf_prog { kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ - u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ + u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ unsigned int (*bpf_func)(const void *ctx, @@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) return prog->len * sizeof(struct bpf_insn); } -static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog) +static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + sizeof(__be64) + 1, SHA_MESSAGE_BYTES); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 56aec84237ad..cb09238f6d32 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -514,8 +514,8 @@ extern enum system_states { #define TAINT_FLAGS_COUNT 16 struct taint_flag { - char true; /* character printed when tainted */ - char false; /* character printed when not tainted */ + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ bool module; /* also show as a per-module taint flag */ }; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 321f9ed552a9..01f71e1d2e94 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -444,6 +444,10 @@ bool __rcu_is_watching(void); #error "Unknown RCU implementation specified to kernel configuration" #endif +#define RCU_SCHEDULER_INACTIVE 0 +#define RCU_SCHEDULER_INIT 1 +#define RCU_SCHEDULER_RUNNING 2 + /* * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic * initialization and destruction of rcu_head on the stack. rcu_head structures diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index e5d193440374..7440290f64ac 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -66,6 +66,7 @@ struct svc_xprt { #define XPT_LISTENER 10 /* listening endpoint */ #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ +#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fc5848dad7a4..c93f4b3a59cb 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { + union { + u8 val[TCP_FASTOPEN_COOKIE_MAX]; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr addr; +#endif + }; s8 len; - u8 val[TCP_FASTOPEN_COOKIE_MAX]; bool exp; /* In RFC6994 experimental option format */ }; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 96dd0b3f70d7..da5033dd8cbc 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn) /** * fc_set_wwpn() - Set the World Wide Port Name of a local port * @lport: The local port whose WWPN is to be set - * @wwnn: The new WWPN + * @wwpn: The new WWPN */ -static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn) +static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn) { - lport->wwpn = wwnn; + lport->wwpn = wwpn; } /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6b76e3b0c18e..bea982af9cfb 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1772,7 +1772,9 @@ enum nl80211_commands { * * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode * Notification Element based on association request when used with - * %NL80211_CMD_NEW_STATION; u8 attribute. + * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when + * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS); + * u8 attribute. * * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index cb4bcdc58543..a4dcd88ec271 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -397,7 +397,7 @@ enum { TCA_BPF_NAME, TCA_BPF_FLAGS, TCA_BPF_FLAGS_GEN, - TCA_BPF_DIGEST, + TCA_BPF_TAG, __TCA_BPF_MAX, }; diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h index a6b88a6f7f71..975b50dc8d1d 100644 --- a/include/uapi/linux/tc_act/tc_bpf.h +++ b/include/uapi/linux/tc_act/tc_bpf.h @@ -27,7 +27,7 @@ enum { TCA_ACT_BPF_FD, TCA_ACT_BPF_NAME, TCA_ACT_BPF_PAD, - TCA_ACT_BPF_DIGEST, + TCA_ACT_BPF_TAG, __TCA_ACT_BPF_MAX, }; #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1eb4f1303756..503d4211988a 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp) vfree(fp); } -int bpf_prog_calc_digest(struct bpf_prog *fp) +int bpf_prog_calc_tag(struct bpf_prog *fp) { const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); - u32 raw_size = bpf_prog_digest_scratch_size(fp); + u32 raw_size = bpf_prog_tag_scratch_size(fp); + u32 digest[SHA_DIGEST_WORDS]; u32 ws[SHA_WORKSPACE_WORDS]; u32 i, bsize, psize, blocks; struct bpf_insn *dst; @@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp) if (!raw) return -ENOMEM; - sha_init(fp->digest); + sha_init(digest); memset(ws, 0, sizeof(ws)); /* We need to take out the map fd for the digest calculation @@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp) *bits = cpu_to_be64((psize - 1) << 3); while (blocks--) { - sha_transform(fp->digest, todo, ws); + sha_transform(digest, todo, ws); todo += SHA_MESSAGE_BYTES; } - result = (__force __be32 *)fp->digest; + result = (__force __be32 *)digest; for (i = 0; i < SHA_DIGEST_WORDS; i++) - result[i] = cpu_to_be32(fp->digest[i]); + result[i] = cpu_to_be32(digest[i]); + memcpy(fp->tag, result, sizeof(fp->tag)); vfree(raw); return 0; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index e89acea22ecf..1d6b29e4e2c3 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -688,17 +688,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp) static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_prog *prog = filp->private_data; - char prog_digest[sizeof(prog->digest) * 2 + 1] = { }; + char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; - bin2hex(prog_digest, prog->digest, sizeof(prog->digest)); + bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); seq_printf(m, "prog_type:\t%u\n" "prog_jited:\t%u\n" - "prog_digest:\t%s\n" + "prog_tag:\t%s\n" "memlock:\t%llu\n", prog->type, prog->jited, - prog_digest, + prog_tag, prog->pages * 1ULL << PAGE_SHIFT); } #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 83ed2f8f6f22..cdc43b899f28 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) int insn_cnt = env->prog->len; int i, j, err; - err = bpf_prog_calc_digest(env->prog); + err = bpf_prog_calc_tag(env->prog); if (err) return err; diff --git a/kernel/cpu.c b/kernel/cpu.c index f75c4d031eeb..0a5f630f5c54 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0; - bool hasdied = false; if (num_online_cpus() == 1) return -EBUSY; @@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, cpuhp_kick_ap_work(cpu); } - hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; out: cpu_hotplug_done(); return ret; @@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state) */ static int cpuhp_reserve_state(enum cpuhp_state state) { - enum cpuhp_state i; + enum cpuhp_state i, end; + struct cpuhp_step *step; - for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { - if (!cpuhp_ap_states[i].name) + switch (state) { + case CPUHP_AP_ONLINE_DYN: + step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN; + end = CPUHP_AP_ONLINE_DYN_END; + break; + case CPUHP_BP_PREPARE_DYN: + step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN; + end = CPUHP_BP_PREPARE_DYN_END; + break; + default: + return -EINVAL; + } + + for (i = state; i <= end; i++, step++) { + if (!step->name) return i; } WARN(1, "No more dynamic states available for CPU hotplug\n"); @@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, mutex_lock(&cpuhp_state_mutex); - if (state == CPUHP_AP_ONLINE_DYN) { + if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { ret = cpuhp_reserve_state(state); if (ret < 0) goto out; diff --git a/kernel/module.c b/kernel/module.c index 5088784c0cf9..38d4270925d4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf) for (i = 0; i < TAINT_FLAGS_COUNT; i++) { if (taint_flags[i].module && test_bit(i, &mod->taints)) - buf[l++] = taint_flags[i].true; + buf[l++] = taint_flags[i].c_true; } return l; diff --git a/kernel/panic.c b/kernel/panic.c index c51edaa04fce..901c4fb46002 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -355,7 +355,7 @@ const char *print_tainted(void) for (i = 0; i < TAINT_FLAGS_COUNT; i++) { const struct taint_flag *t = &taint_flags[i]; *s++ = test_bit(i, &tainted_mask) ? - t->true : t->false; + t->c_true : t->c_false; } *s = 0; } else diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 80adef7d4c3d..0d6ff3e471be 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void); #define TPS(x) tracepoint_string(x) void rcu_early_boot_tests(void); +void rcu_test_sync_prims(void); /* * This function really isn't for public consumption, but RCU is special in diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 1898559e6b60..b23a4d076f3d 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused * benefits of doing might_sleep() to reduce latency.) * * Cool, huh? (Due to Josh Triplett.) - * - * But we want to make this a static inline later. The cond_resched() - * currently makes this problematic. */ void synchronize_sched(void) { @@ -195,7 +192,6 @@ void synchronize_sched(void) lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_sched() in RCU read-side critical section"); - cond_resched(); } EXPORT_SYMBOL_GPL(synchronize_sched); diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index 196f0302e2f4..c64b827ecbca 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h @@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); /* * During boot, we forgive RCU lockdep issues. After this function is - * invoked, we start taking RCU lockdep issues seriously. + * invoked, we start taking RCU lockdep issues seriously. Note that unlike + * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE + * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage. + * The reason for this is that Tiny RCU does not need kthreads, so does + * not have to care about the fact that the scheduler is half-initialized + * at a certain phase of the boot process. */ void __init rcu_scheduler_starting(void) { WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = 1; + rcu_scheduler_active = RCU_SCHEDULER_RUNNING; } #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 96c52e43f7ca..cb4e2056ccf3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ int sysctl_panic_on_rcu_stall __read_mostly; /* - * The rcu_scheduler_active variable transitions from zero to one just - * before the first task is spawned. So when this variable is zero, RCU - * can assume that there is but one task, allowing RCU to (for example) + * The rcu_scheduler_active variable is initialized to the value + * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the + * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, + * RCU can assume that there is but one task, allowing RCU to (for example) * optimize synchronize_rcu() to a simple barrier(). When this variable - * is one, RCU must actually do all the hard work required to detect real - * grace periods. This variable is also used to suppress boot-time false - * positives from lockdep-RCU error checking. + * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required + * to detect real grace periods. This variable is also used to suppress + * boot-time false positives from lockdep-RCU error checking. Finally, it + * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU + * is fully initialized, including all of its kthreads having been spawned. */ int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); @@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void) early_initcall(rcu_spawn_gp_kthread); /* - * This function is invoked towards the end of the scheduler's initialization - * process. Before this is called, the idle task might contain - * RCU read-side critical sections (during which time, this idle - * task is booting the system). After this function is called, the - * idle tasks are prohibited from containing RCU read-side critical - * sections. This function also enables RCU lockdep checking. + * This function is invoked towards the end of the scheduler's + * initialization process. Before this is called, the idle task might + * contain synchronous grace-period primitives (during which time, this idle + * task is booting the system, and such primitives are no-ops). After this + * function is called, any synchronous grace-period primitives are run as + * expedited, with the requesting task driving the grace period forward. + * A later core_initcall() rcu_exp_runtime_mode() will switch to full + * runtime RCU functionality. */ void rcu_scheduler_starting(void) { WARN_ON(num_online_cpus() != 1); WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = 1; + rcu_test_sync_prims(); + rcu_scheduler_active = RCU_SCHEDULER_INIT; + rcu_test_sync_prims(); } /* diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index d3053e99fdb6..e59e1849b89a 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -532,18 +532,28 @@ struct rcu_exp_work { }; /* + * Common code to drive an expedited grace period forward, used by + * workqueues and mid-boot-time tasks. + */ +static void rcu_exp_sel_wait_wake(struct rcu_state *rsp, + smp_call_func_t func, unsigned long s) +{ + /* Initialize the rcu_node tree in preparation for the wait. */ + sync_rcu_exp_select_cpus(rsp, func); + + /* Wait and clean up, including waking everyone. */ + rcu_exp_wait_wake(rsp, s); +} + +/* * Work-queue handler to drive an expedited grace period forward. */ static void wait_rcu_exp_gp(struct work_struct *wp) { struct rcu_exp_work *rewp; - /* Initialize the rcu_node tree in preparation for the wait. */ rewp = container_of(wp, struct rcu_exp_work, rew_work); - sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); - - /* Wait and clean up, including waking everyone. */ - rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s); + rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s); } /* @@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, if (exp_funnel_lock(rsp, s)) return; /* Someone else did our work for us. */ - /* Marshall arguments and schedule the expedited grace period. */ - rew.rew_func = func; - rew.rew_rsp = rsp; - rew.rew_s = s; - INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); - schedule_work(&rew.rew_work); + /* Ensure that load happens before action based on it. */ + if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { + /* Direct call during scheduler init and early_initcalls(). */ + rcu_exp_sel_wait_wake(rsp, func, s); + } else { + /* Marshall arguments & schedule the expedited grace period. */ + rew.rew_func = func; + rew.rew_rsp = rsp; + rew.rew_s = s; + INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); + schedule_work(&rew.rew_work); + } /* Wait for expedited grace period to complete. */ rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); @@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void) { struct rcu_state *rsp = rcu_state_p; + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) + return; _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); @@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void) EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/* + * Switch to run-time mode once Tree RCU has fully initialized. + */ +static int __init rcu_exp_runtime_mode(void) +{ + rcu_test_sync_prims(); + rcu_scheduler_active = RCU_SCHEDULER_RUNNING; + rcu_test_sync_prims(); + return 0; +} +core_initcall(rcu_exp_runtime_mode); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 85c5a883c6e3..56583e764ebf 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -670,7 +670,7 @@ void synchronize_rcu(void) lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_rcu() in RCU read-side critical section"); - if (!rcu_scheduler_active) + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return; if (rcu_gp_is_expedited()) synchronize_rcu_expedited(); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index f19271dce0a9..4f6db7e6a117 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held); * Should expedited grace-period primitives always fall back to their * non-expedited counterparts? Intended for use within RCU. Note * that if the user specifies both rcu_expedited and rcu_normal, then - * rcu_normal wins. + * rcu_normal wins. (Except during the time period during boot from + * when the first task is spawned until the rcu_exp_runtime_mode() + * core_initcall() is invoked, at which point everything is expedited.) */ bool rcu_gp_is_normal(void) { - return READ_ONCE(rcu_normal); + return READ_ONCE(rcu_normal) && + rcu_scheduler_active != RCU_SCHEDULER_INIT; } EXPORT_SYMBOL_GPL(rcu_gp_is_normal); @@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting = /* * Should normal grace-period primitives be expedited? Intended for * use within RCU. Note that this function takes the rcu_expedited - * sysfs/boot variable into account as well as the rcu_expedite_gp() - * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() - * returns false is a -really- bad idea. + * sysfs/boot variable and rcu_scheduler_active into account as well + * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() + * until rcu_gp_is_expedited() returns false is a -really- bad idea. */ bool rcu_gp_is_expedited(void) { - return rcu_expedited || atomic_read(&rcu_expedited_nesting); + return rcu_expedited || atomic_read(&rcu_expedited_nesting) || + rcu_scheduler_active == RCU_SCHEDULER_INIT; } EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); @@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map); int notrace debug_lockdep_rcu_enabled(void) { - return rcu_scheduler_active && debug_locks && + return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); @@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks); void synchronize_rcu_tasks(void) { /* Complain if the scheduler has not started. */ - RCU_LOCKDEP_WARN(!rcu_scheduler_active, + RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, "synchronize_rcu_tasks called too soon"); /* Wait for the grace period. */ @@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void) #endif /* #ifdef CONFIG_TASKS_RCU */ +/* + * Test each non-SRCU synchronous grace-period wait API. This is + * useful just after a change in mode for these primitives, and + * during early boot. + */ +void rcu_test_sync_prims(void) +{ + if (!IS_ENABLED(CONFIG_PROVE_RCU)) + return; + synchronize_rcu(); + synchronize_rcu_bh(); + synchronize_sched(); + synchronize_rcu_expedited(); + synchronize_rcu_bh_expedited(); + synchronize_sched_expedited(); +} + #ifdef CONFIG_PROVE_RCU /* @@ -865,6 +886,7 @@ void rcu_early_boot_tests(void) early_boot_test_call_rcu_bh(); if (rcu_self_test_sched) early_boot_test_call_rcu_sched(); + rcu_test_sync_prims(); } static int rcu_verify_early_boot_tests(void) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 975b8fc4f1e1..a8d74a733a38 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -483,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* - * For mappings greater than a page, we limit the stride (and - * hence alignment) to a page size. + * For mappings greater than or equal to a page, we limit the stride + * (and hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size > PAGE_SIZE) + if (size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 4855d18a8511..038b109b2be7 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason) { ax25_clear_queues(ax25); - if (!sock_flag(ax25->sk, SOCK_DESTROY)) + if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) ax25_stop_heartbeat(ax25); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 3949ce70be07..292e33bd916e 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt, SKCIPHER_REQUEST_ON_STACK(req, key->tfm); struct sg_table sgt; struct scatterlist prealloc_sg; - char iv[AES_BLOCK_SIZE]; + char iv[AES_BLOCK_SIZE] __aligned(8); int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); int crypt_len = encrypt ? in_len + pad_byte : in_len; int ret; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index eba1546b5031..9a375b908d01 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) goto nla_put_failure; #endif - if (fi->fib_nh->nh_lwtstate) - lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate); + if (fi->fib_nh->nh_lwtstate && + lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0) + goto nla_put_failure; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (fi->fib_nhs > 1) { @@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) goto nla_put_failure; #endif - if (nh->nh_lwtstate) - lwtunnel_fill_encap(skb, nh->nh_lwtstate); + if (nh->nh_lwtstate && + lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0) + goto nla_put_failure; + /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; } endfor_nexthops(fi); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0fcac8e7a2b2..709ffe67d1de 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2472,7 +2472,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, r->rtm_dst_len = 32; r->rtm_src_len = 0; r->rtm_tos = fl4->flowi4_tos; - r->rtm_table = table_id; + r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; if (nla_put_u32(skb, RTA_TABLE, table_id)) goto nla_put_failure; r->rtm_type = rt->rt_type; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 4e777a3243f9..f51919535ca7 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req, struct tcp_fastopen_cookie tmp; if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { - struct in6_addr *buf = (struct in6_addr *) tmp.val; + struct in6_addr *buf = &tmp.addr; int i; for (i = 0; i < 4; i++) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 36d292180942..753d6d0860fb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1108,7 +1108,7 @@ route_lookup: t->parms.name); goto tx_err_dst_release; } - mtu = dst_mtu(dst) - psh_hlen; + mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen; if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; @@ -1117,7 +1117,7 @@ route_lookup: mtu = IPV6_MIN_MTU; if (skb_dst(skb) && !t->parms.collect_md) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); - if (skb->len > mtu && !skb_is_gso(skb)) { + if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 14a3903f1c82..7139fffd61b6 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data); static void mld_ifc_timer_expire(unsigned long data); static void mld_ifc_event(struct inet6_dev *idev); static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); -static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); +static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); static void mld_clear_delrec(struct inet6_dev *idev); static bool mld_in_v1_mode(const struct inet6_dev *idev); static int sf_setstate(struct ifmcaddr6 *pmc); @@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) dev_mc_del(dev, buf); } - if (mc->mca_flags & MAF_NOREPORT) - goto done; spin_unlock_bh(&mc->mca_lock); + if (mc->mca_flags & MAF_NOREPORT) + return; if (!mc->idev->dead) igmp6_leave_group(mc); @@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) spin_lock_bh(&mc->mca_lock); if (del_timer(&mc->mca_timer)) atomic_dec(&mc->mca_refcnt); -done: - ip6_mc_clear_src(mc); spin_unlock_bh(&mc->mca_lock); } @@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) spin_unlock_bh(&idev->mc_lock); } -static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) +static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) { struct ifmcaddr6 *pmc, *pmc_prev; - struct ip6_sf_list *psf, *psf_next; + struct ip6_sf_list *psf; + struct in6_addr *pmca = &im->mca_addr; spin_lock_bh(&idev->mc_lock); pmc_prev = NULL; @@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) } spin_unlock_bh(&idev->mc_lock); + spin_lock_bh(&im->mca_lock); if (pmc) { - for (psf = pmc->mca_tomb; psf; psf = psf_next) { - psf_next = psf->sf_next; - kfree(psf); + im->idev = pmc->idev; + im->mca_crcount = idev->mc_qrv; + im->mca_sfmode = pmc->mca_sfmode; + if (pmc->mca_sfmode == MCAST_INCLUDE) { + im->mca_tomb = pmc->mca_tomb; + im->mca_sources = pmc->mca_sources; + for (psf = im->mca_sources; psf; psf = psf->sf_next) + psf->sf_crcount = im->mca_crcount; } in6_dev_put(pmc->idev); - kfree(pmc); } + spin_unlock_bh(&im->mca_lock); } static void mld_clear_delrec(struct inet6_dev *idev) @@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) mca_get(mc); write_unlock_bh(&idev->lock); - mld_del_delrec(idev, &mc->mca_addr); + mld_del_delrec(idev, mc); igmp6_group_added(mc); ma_put(mc); return 0; @@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) write_unlock_bh(&idev->lock); igmp6_group_dropped(ma); + ip6_mc_clear_src(ma); ma_put(ma); return 0; @@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev) /* Withdraw multicast list */ read_lock_bh(&idev->lock); - mld_ifc_stop_timer(idev); - mld_gq_stop_timer(idev); - mld_dad_stop_timer(idev); for (i = idev->mc_list; i; i = i->next) igmp6_group_dropped(i); - read_unlock_bh(&idev->lock); - mld_clear_delrec(idev); + /* Should stop timer after group drop. or we will + * start timer again in mld_ifc_event() + */ + mld_ifc_stop_timer(idev); + mld_gq_stop_timer(idev); + mld_dad_stop_timer(idev); + read_unlock_bh(&idev->lock); } static void ipv6_mc_reset(struct inet6_dev *idev) @@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev) read_lock_bh(&idev->lock); ipv6_mc_reset(idev); - for (i = idev->mc_list; i; i = i->next) + for (i = idev->mc_list; i; i = i->next) { + mld_del_delrec(idev, i); igmp6_group_added(i); + } read_unlock_bh(&idev->lock); } @@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) /* Deactivate timers */ ipv6_mc_down(idev); + mld_clear_delrec(idev); /* Delete all-nodes address. */ /* We cannot call ipv6_dev_mc_dec() directly, our caller in @@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) write_lock_bh(&idev->lock); while ((i = idev->mc_list) != NULL) { idev->mc_list = i->next; - write_unlock_bh(&idev->lock); - igmp6_group_dropped(i); + write_unlock_bh(&idev->lock); ma_put(i); - write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index ce5aaf448c54..4f6b067c8753 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3317,7 +3317,8 @@ static int rt6_fill_node(struct net *net, if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure; - lwtunnel_fill_encap(skb, rt->dst.lwtstate); + if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) + goto nla_put_failure; nlmsg_end(skb, nlh); return 0; diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index ef1c8a46e7ac..03a064803626 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void) *p_tfm = tfm; } - p_tfm = this_cpu_ptr(algo->tfms); + p_tfm = raw_cpu_ptr(algo->tfms); tfm = *p_tfm; shsize = sizeof(*shash) + crypto_shash_descsize(tfm); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index bbfca22c34ae..1d60cb132835 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); #ifdef CONFIG_DST_CACHE + preempt_disable(); dst = dst_cache_get(&slwt->cache); + preempt_enable(); #endif if (unlikely(!dst)) { @@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) } #ifdef CONFIG_DST_CACHE + preempt_disable(); dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); + preempt_enable(); #endif } diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e75cbf6ecc26..a0d901d8992e 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata) !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) continue; - if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC)) - continue; - max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); } rcu_read_unlock(); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 41497b670e2b..d37ae7dc114b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -6,6 +6,7 @@ * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work) } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_VHT) { switch (mgmt->u.action.u.vht_group_notif.action_code) { + case WLAN_VHT_ACTION_OPMODE_NOTIF: { + struct ieee80211_rx_status *status; + enum nl80211_band band; + u8 opmode; + + status = IEEE80211_SKB_RXCB(skb); + band = status->band; + opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + + if (sta) + ieee80211_vht_handle_opmode(sdata, sta, + opmode, + band); + + mutex_unlock(&local->sta_mtx); + break; + } case WLAN_VHT_ACTION_GROUPID_MGMT: ieee80211_process_mu_groups(sdata, mgmt); break; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1822c77f2b1c..56fb47953b72 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; - if (sband->ht_cap.ht_supported) - local->rx_chains = - max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), - local->rx_chains); + if (!sband->ht_cap.ht_supported) + continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ + local->rx_chains = + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), + local->rx_chains); + + /* no need to mask, SM_PS_DISABLED has all bits set */ + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN */ diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 206698bc93f4..9e2641d45587 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -40,6 +40,8 @@ void rate_control_rate_init(struct sta_info *sta) ieee80211_sta_set_rx_nss(sta); + ieee80211_recalc_min_chandef(sta->sdata); + if (!ref) return; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3e289a64ed43..3090dd4342f6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (!ifmsh->mshcfg.dot11MeshForwarding) goto out; - fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC); + fwd_skb = skb_copy_expand(skb, local->tx_headroom + + sdata->encrypt_headroom, 0, GFP_ATOMIC); if (!fwd_skb) { net_info_ratelimited("%s: failed to clone mesh frame\n", sdata->name); @@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.u.vht_opmode_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { - u8 opmode; - /* verify opmode is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; - - opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; - - ieee80211_vht_handle_opmode(rx->sdata, rx->sta, - opmode, status->band); - goto handled; + goto queue; } case WLAN_VHT_ACTION_GROUPID_MGMT: { if (len < IEEE80211_MIN_ACTION_SIZE + 25) @@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, u64_stats_update_end(&stats->syncp); if (fast_rx->internal_forward) { - struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); + struct sk_buff *xmit_skb = NULL; + bool multicast = is_multicast_ether_addr(skb->data); + + if (multicast) { + xmit_skb = skb_copy(skb, GFP_ATOMIC); + } else if (sta_info_get(rx->sdata, skb->data)) { + xmit_skb = skb; + skb = NULL; + } - if (dsta) { + if (xmit_skb) { /* * Send to wireless media and increase priority by 256 * to keep the received priority instead of * reclassifying the frame (see cfg80211_classify8021d). */ - skb->priority += 256; - skb->protocol = htons(ETH_P_802_3); - skb_reset_network_header(skb); - skb_reset_mac_header(skb); - dev_queue_xmit(skb); - return true; + xmit_skb->priority += 256; + xmit_skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(xmit_skb); + skb_reset_mac_header(xmit_skb); + dev_queue_xmit(xmit_skb); } + + if (!skb) + return true; } /* deliver to local stack */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index b6cfcf038c11..50c309094c37 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, /* This will evaluate to 1, 3, 5 or 7. */ for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) - if (ignored_acs & BIT(ac)) - continue; + if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) + break; tid = 7 - 2 * ac; ieee80211_send_null_response(sta, tid, reason, true, false); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0d8b716e509e..797e847cbc49 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, struct ieee80211_vif *vif, - struct ieee80211_sta *pubsta, + struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, if (!ieee80211_is_data(hdr->frame_control)) return NULL; - if (pubsta) { + if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; - txq = pubsta->txq[tid]; + if (!sta->uploaded) + return NULL; + + txq = sta->sta.txq[tid]; } else if (vif) { txq = vif->txq; } @@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, struct fq *fq = &local->fq; struct ieee80211_vif *vif; struct txq_info *txqi; - struct ieee80211_sta *pubsta; if (!local->ops->wake_tx_queue || sdata->vif.type == NL80211_IFTYPE_MONITOR) return false; - if (sta && sta->uploaded) - pubsta = &sta->sta; - else - pubsta = NULL; - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); vif = &sdata->vif; - txqi = ieee80211_get_txq(local, vif, pubsta, skb); + txqi = ieee80211_get_txq(local, vif, sta, skb); if (!txqi) return false; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 6832bf6ab69f..43e45bb660bc 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); - if (changed > 0) + if (changed > 0) { + ieee80211_recalc_min_chandef(sdata); rate_control_rate_update(local, sband, sta, changed); + } } void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6b78bab27755..54253ea5976e 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, int hooknum, nh_off, err = NF_ACCEPT; nh_off = skb_network_offset(skb); - skb_pull(skb, nh_off); + skb_pull_rcsum(skb, nh_off); /* See HOOK2MANIP(). */ if (maniptype == NF_NAT_MANIP_SRC) @@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, err = nf_nat_packet(ct, ctinfo, hooknum, skb); push: skb_push(skb, nh_off); + skb_postpush_rcsum(skb, skb->data, nh_off); return err; } @@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, /* The conntrack module expects to be working at L3. */ nh_ofs = skb_network_offset(skb); - skb_pull(skb, nh_ofs); + skb_pull_rcsum(skb, nh_ofs); if (key->ip.frag != OVS_FRAG_TYPE_NONE) { err = handle_fragments(net, key, info->zone.id, skb); @@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, err = ovs_ct_lookup(net, key, info, skb); skb_push(skb, nh_ofs); + skb_postpush_rcsum(skb, skb->data, nh_ofs); if (err) kfree_skb(skb); return err; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2095c83ce773..e10456ef6f7a 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, goto err; } act->order = i; - if (event == RTM_GETACTION) - act->tcfa_refcnt++; list_add_tail(&act->list, &actions); } @@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, return ret; } err: - tcf_action_destroy(&actions, 0); + if (event != RTM_GETACTION) + tcf_action_destroy(&actions, 0); return ret; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 1c60317f0121..520baa41cba3 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog, nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) return -EMSGSIZE; - nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST, - sizeof(prog->filter->digest)); + nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag)); if (nla == NULL) return -EMSGSIZE; - memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); + memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); return 0; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index adc776048d1a..d9c97018317d 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) return -EMSGSIZE; - nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest)); + nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); if (nla == NULL) return -EMSGSIZE; - memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); + memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); return 0; } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 886e9d381771..153082598522 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) case RPC_GSS_PROC_DESTROY: if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) goto auth_err; - rsci->h.expiry_time = get_seconds(); + rsci->h.expiry_time = seconds_since_boot(); set_bit(CACHE_NEGATIVE, &rsci->h.flags); if (resv->iov_len + 4 > PAGE_SIZE) goto drop; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3bc1d61694cb..9c9db55a0c1e 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); + if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) + xprt->xpt_ops->xpo_kill_temp_xprt(xprt); svc_delete_xprt(xprt); /* Leave XPT_BUSY set on the dead xprt: */ goto out; @@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) le = to_be_closed.next; list_del_init(le); xprt = list_entry(le, struct svc_xprt, xpt_list); - dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); - xprt->xpt_ops->xpo_kill_temp_xprt(xprt); - svc_close_xprt(xprt); + set_bit(XPT_CLOSE, &xprt->xpt_flags); + set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); + dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", + xprt); + svc_xprt_enqueue(xprt); } } EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 57d35fbb1c28..172b537f8cfc 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, atomic_inc(&rdma_stat_read); return ret; err: - ib_dma_unmap_sg(xprt->sc_cm_id->device, - frmr->sg, frmr->sg_nents, frmr->direction); svc_rdma_put_context(ctxt, 0); svc_rdma_put_frmr(xprt, frmr); return ret; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 6b109a808d4c..02462d67d191 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, /* Send response, if necessary */ if (respond && (mtyp == DSC_REQ_MSG)) { - rskb = tipc_buf_acquire(MAX_H_SIZE); + rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!rskb) return; tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer); @@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b, req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) return -ENOMEM; - req->buf = tipc_buf_acquire(MAX_H_SIZE); + req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!req->buf) { kfree(req); return -ENOMEM; diff --git a/net/tipc/link.c b/net/tipc/link.c index bda89bf9f4ff..4e8647aef01c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1395,7 +1395,7 @@ tnl: msg_set_seqno(hdr, seqno++); pktlen = msg_size(hdr); msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); - tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); + tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); if (!tnlskb) { pr_warn("%sunable to send packet\n", link_co_err); return; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index a22be502f1bd..ab02d0742476 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -58,12 +58,12 @@ static unsigned int align(unsigned int i) * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ -struct sk_buff *tipc_buf_acquire(u32 size) +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) { struct sk_buff *skb; unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; - skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); + skb = alloc_skb_fclone(buf_size, gfp); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); @@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, struct tipc_msg *msg; struct sk_buff *buf; - buf = tipc_buf_acquire(hdr_sz + data_sz); + buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); if (unlikely(!buf)) return NULL; @@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, /* No fragmentation needed? */ if (likely(msz <= pktmax)) { - skb = tipc_buf_acquire(msz); + skb = tipc_buf_acquire(msz, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; skb_orphan(skb); @@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ - skb = tipc_buf_acquire(pktmax); + skb = tipc_buf_acquire(pktmax, GFP_KERNEL); if (!skb) return -ENOMEM; skb_orphan(skb); @@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, pktsz = drem + INT_H_SIZE; else pktsz = pktmax; - skb = tipc_buf_acquire(pktsz); + skb = tipc_buf_acquire(pktsz, GFP_KERNEL); if (!skb) { rc = -ENOMEM; goto error; @@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, if (msz > (max / 2)) return false; - _skb = tipc_buf_acquire(max); + _skb = tipc_buf_acquire(max, GFP_ATOMIC); if (!_skb) return false; @@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) /* Never return SHORT header; expand by replacing buffer if necessary */ if (msg_short(hdr)) { - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); + *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); if (!*skb) goto exit; memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 8d408612ffa4..2c3dc38abf9c 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr) return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); } -struct sk_buff *tipc_buf_acquire(u32 size); +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp); bool tipc_msg_validate(struct sk_buff *skb); bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c1cfd92de17a..23f8899e0f8c 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, u32 dest) { struct tipc_net *tn = net_generic(net, tipc_net_id); - struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); + struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); struct tipc_msg *msg; if (buf != NULL) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ef5eff93a8b8..5c1b267e22be 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy, break; } + /* + * Older kernel versions ignored this attribute entirely, so don't + * reject attempts to update it but mark it as unused instead so the + * driver won't look at the data. + */ + if (statype != CFG80211_STA_AP_CLIENT_UNASSOC && + statype != CFG80211_STA_TDLS_PEER_SETUP) + params->opmode_notif_used = false; + return 0; } EXPORT_SYMBOL(cfg80211_check_station_change); @@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) params.local_pm = pm; } + if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { + params.opmode_notif_used = true; + params.opmode_notif = + nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); + } + /* Include parameters for TDLS peer (will check later) */ err = nl80211_set_station_tdls(info, ¶ms); if (err) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 4a57c8a60bd9..6a6f44dd594b 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -610,6 +610,33 @@ error: return ret ? : -ENOENT; } +/* Adjust symbol name and address */ +static int post_process_probe_trace_point(struct probe_trace_point *tp, + struct map *map, unsigned long offs) +{ + struct symbol *sym; + u64 addr = tp->address + tp->offset - offs; + + sym = map__find_symbol(map, addr); + if (!sym) + return -ENOENT; + + if (strcmp(sym->name, tp->symbol)) { + /* If we have no realname, use symbol for it */ + if (!tp->realname) + tp->realname = tp->symbol; + else + free(tp->symbol); + tp->symbol = strdup(sym->name); + if (!tp->symbol) + return -ENOMEM; + } + tp->offset = addr - sym->start; + tp->address -= offs; + + return 0; +} + /* * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions * and generate new symbols with suffixes such as .constprop.N or .isra.N @@ -622,11 +649,9 @@ static int post_process_offline_probe_trace_events(struct probe_trace_event *tevs, int ntevs, const char *pathname) { - struct symbol *sym; struct map *map; unsigned long stext = 0; - u64 addr; - int i; + int i, ret = 0; /* Prepare a map for offline binary */ map = dso__new_map(pathname); @@ -636,23 +661,14 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs, } for (i = 0; i < ntevs; i++) { - addr = tevs[i].point.address + tevs[i].point.offset - stext; - sym = map__find_symbol(map, addr); - if (!sym) - continue; - if (!strcmp(sym->name, tevs[i].point.symbol)) - continue; - /* If we have no realname, use symbol for it */ - if (!tevs[i].point.realname) - tevs[i].point.realname = tevs[i].point.symbol; - else - free(tevs[i].point.symbol); - tevs[i].point.symbol = strdup(sym->name); - tevs[i].point.offset = addr - sym->start; + ret = post_process_probe_trace_point(&tevs[i].point, + map, stext); + if (ret < 0) + break; } map__put(map); - return 0; + return ret; } static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, @@ -682,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, return ret; } -static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, - int ntevs, const char *module) +static int +post_process_module_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + struct debuginfo *dinfo) { + Dwarf_Addr text_offs = 0; int i, ret = 0; char *mod_name = NULL; + struct map *map; if (!module) return 0; - mod_name = find_module_name(module); + map = get_target_map(module, false); + if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) { + pr_warning("Failed to get ELF symbols for %s\n", module); + return -EINVAL; + } + mod_name = find_module_name(module); for (i = 0; i < ntevs; i++) { + ret = post_process_probe_trace_point(&tevs[i].point, + map, (unsigned long)text_offs); + if (ret < 0) + break; tevs[i].point.module = strdup(mod_name ? mod_name : module); if (!tevs[i].point.module) { @@ -703,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, } free(mod_name); + map__put(map); + return ret; } @@ -760,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse static int post_process_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event *tevs, int ntevs, const char *module, - bool uprobe) + bool uprobe, struct debuginfo *dinfo) { int ret; @@ -768,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev, ret = add_exec_to_probe_trace_events(tevs, ntevs, module); else if (module) /* Currently ref_reloc_sym based probe is not for drivers */ - ret = add_module_to_probe_trace_events(tevs, ntevs, module); + ret = post_process_module_probe_trace_events(tevs, ntevs, + module, dinfo); else ret = post_process_kernel_probe_trace_events(tevs, ntevs); @@ -812,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, } } - debuginfo__delete(dinfo); - if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("Found %d probe_trace_events.\n", ntevs); ret = post_process_probe_trace_events(pev, *tevs, ntevs, - pev->target, pev->uprobes); + pev->target, pev->uprobes, dinfo); if (ret < 0 || ret == ntevs) { + pr_debug("Post processing failed or all events are skipped. (%d)\n", ret); clear_probe_trace_events(*tevs, ntevs); zfree(tevs); + ntevs = 0; } - if (ret != ntevs) - return ret < 0 ? ret : ntevs; - ntevs = 0; - /* Fall through */ } + debuginfo__delete(dinfo); + if (ntevs == 0) { /* No error but failed to find probe point. */ pr_warning("Probe point '%s' not found.\n", synthesize_perf_probe_point(&pev->point)); return -ENOENT; - } - /* Error path : ntevs < 0 */ - pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); - if (ntevs < 0) { + } else if (ntevs < 0) { + /* Error path : ntevs < 0 */ + pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); if (ntevs == -EBADF) pr_warning("Warning: No dwarf info found in the vmlinux - " "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index df4debe564da..0d9d6e0803b8 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg, } /* For the kernel module, we need a special code to get a DIE */ -static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset) { int n, i; Elf32_Word shndx; @@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) if (!shdr) return -ENOENT; *offs = shdr->sh_addr; + if (adjust_offset) + *offs -= shdr->sh_offset; } } return 0; @@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; - bool reloc = false; -retry: + /* We always need to relocate the address for aranges */ + if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0) + addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { - addr += baseaddr; - reloc = true; - goto retry; - } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index f1d8558f498e..2956c5198652 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt); +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset); + /* Find a line range */ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index c22860ab9733..30e1ac62e8cb 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -66,7 +66,7 @@ int pmc56_overflow(void) FAIL_IF(ebb_event_enable(&event)); - mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC2, pmc_sample_period(sample_period)); mtspr(SPRN_PMC5, 0); mtspr(SPRN_PMC6, 0); diff --git a/tools/usb/usbip/README b/tools/usb/usbip/README index 831f49fea3ce..5eb2b6c7722b 100644 --- a/tools/usb/usbip/README +++ b/tools/usb/usbip/README @@ -4,10 +4,33 @@ # Copyright (C) 2011 matt mooney <mfm@muteddisk.com> # 2005-2008 Takahiro Hirofuchi +[Overview] +USB/IP protocol allows to pass USB device from server to client over the +network. Server is a machine which provides (shares) a USB device. Client is +a machine which uses USB device provided by server over the network. +The USB device may be either physical device connected to a server or +software entity created on a server using USB gadget subsystem. +Whole project consists of four parts: + + - usbip-vhci + A client side kernel module which provides a virtual USB Host Controller + and allows to import a USB device from a remote machine. + + - usbip-host (stub driver) + A server side module which provides a USB device driver which can be + bound to a physical USB device to make it exportable. + + - usbip-vudc + A server side module which provides a virtual USB Device Controller and allows + to export a USB device created using USB Gadget Subsystem. + + - usbip-utils + A set of userspace tools used to handle connection and management. + Used on both sides. [Requirements] - USB/IP device drivers - Found in the staging directory of the Linux kernel. + Found in the drivers/usb/usbip/ directory of the Linux kernel tree. - libudev >= 2.0 libudev library @@ -36,6 +59,10 @@ [Usage] +On a server side there are two entities which can be shared. +First of them is physical usb device connected to the machine. +To make it available below steps should be executed: + server:# (Physically attach your USB device.) server:# insmod usbip-core.ko @@ -52,6 +79,30 @@ - The USB device 1-2 is now exportable to other hosts! - Use `usbip unbind --busid 1-2' to stop exporting the device. +Second of shareable entities is USB Gadget created using USB Gadget Subsystem +on a server machine. To make it available below steps should be executed: + + server:# (Create your USB gadget) + - Currently the most preferable way of creating a new USB gadget + is ConfigFS Composite Gadget. Please refer to its documentation + for details. + - See vudc_server_example.sh for a short example of USB gadget creation + + server:# insmod usbip-core.ko + server:# insmod usbip-vudc.ko + - To create more than one instance of vudc use num module param + + server:# (Bind gadget to one of available vudc) + - Assign your new gadget to USB/IP UDC + - Using ConfigFS interface you may do this simply by: + server:# cd /sys/kernel/config/usb_gadget/<gadget_name> + server:# echo "usbip-vudc.0" > UDC + + server:# usbipd -D --device + - Start usbip daemon. + +To attach new device to client machine below commands should be used: + client:# insmod usbip-core.ko client:# insmod vhci-hcd.ko @@ -60,6 +111,8 @@ client:# usbip attach --remote <host> --busid 1-2 - Connect the remote USB device. + - When using vudc on a server side busid is really vudc instance name. + For example: usbip-vudc.0 client:# usbip port - Show virtual port status. @@ -192,6 +245,8 @@ Detach the imported device: - http://usbip.wiki.sourceforge.net/how-to-debug-usbip - usbip-host.ko must be bound to the target device. - See /proc/bus/usb/devices and find "Driver=..." lines of the device. + - Target USB gadget must be bound to vudc + (using USB gadget susbsys, not usbip bind command) - Shutdown firewall. - usbip now uses TCP port 3240. - Disable SELinux. diff --git a/tools/usb/usbip/vudc/vudc_server_example.sh b/tools/usb/usbip/vudc/vudc_server_example.sh new file mode 100755 index 000000000000..2736be64f203 --- /dev/null +++ b/tools/usb/usbip/vudc/vudc_server_example.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +################################################################################ +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# For more information, please refer to <http://unlicense.org/> +################################################################################ + +################################################################################ +# This is a sample script which shows how to use vUDC with ConfigFS gadgets +################################################################################ + +# Stop script on error +set -e + +################################################################################ +# Create your USB gadget +# You may use bare ConfigFS interface (as below) +# or libusbgx or gt toool +# Instead of ConfigFS gadgets you may use any of legacy gadgets. +################################################################################ +CONFIGFS_MOUNT_POINT="/sys/kernel/config" +GADGET_NAME="g1" +ID_VENDOR="0x1d6b" +ID_PRODUCT="0x0104" + +cd ${CONFIGFS_MOUNT_POINT}/usb_gadget +# Create a new USB gadget +mkdir ${GADGET_NAME} +cd ${GADGET_NAME} + +# This gadget contains one function - ACM (serial port over USB) +FUNC_DIR="functions/acm.ser0" +mkdir ${FUNC_DIR} + +# Just one configuration +mkdir configs/c.1 +ln -s ${FUNC_DIR} configs/c.1 + +# Set our gadget identity +echo ${ID_VENDOR} > idVendor +echo ${ID_PRODUCT} > idProduct + +################################################################################ +# Load vudc-module if vudc is not available +# You may change value of num param to get more than one vUDC instance +################################################################################ +[[ -d /sys/class/udc/usbip-vudc.0 ]] || modprobe usbip-vudc num=1 + +################################################################################ +# Bind gadget to our vUDC +# By default we bind to first one but you may change this if you would like +# to use more than one instance +################################################################################ +echo "usbip-vudc.0" > UDC + +################################################################################ +# Let's now run our usbip daemon in a USB device mode +################################################################################ +usbipd --device & + +################################################################################ +# Now your USB gadget is available using USB/IP protocol. +# To prepare your client, you should ensure that usbip-vhci module is inside +# your kernel. If it's not then you can load it: +# +# $ modprobe usbip-vhci +# +# To check availability of your gadget you may try to list devices exported +# on a remote server: +# +# $ modprobe usbip-vhci +# $ usbip list -r $SERVER_IP +# Exportable USB devices +# ====================== +# usbipd: info: request 0x8005(6): complete +# - 127.0.0.1 +# usbip-vudc.0: Linux Foundation : unknown product (1d6b:0104) +# : /sys/devices/platform/usbip-vudc.0 +# : (Defined at Interface level) (00/00/00) +# +# To attach this device to your client you may use: +# +# $ usbip attach -r $SERVER_IP -d usbip-vudc.0 +# +################################################################################ diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h index 34e63cc4c572..14142faf040b 100644 --- a/tools/virtio/ringtest/main.h +++ b/tools/virtio/ringtest/main.h @@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles) #define VMEXIT_CYCLES 500 #define VMENTRY_CYCLES 500 +#elif defined(__s390x__) +static inline void wait_cycles(unsigned long long cycles) +{ + asm volatile("0: brctg %0,0b" : : "d" (cycles)); +} + +/* tweak me */ +#define VMEXIT_CYCLES 200 +#define VMENTRY_CYCLES 200 + #else static inline void wait_cycles(unsigned long long cycles) { @@ -81,6 +91,8 @@ extern unsigned ring_size; /* Is there a portable way to do this? */ #if defined(__x86_64__) || defined(__i386__) #define cpu_relax() asm ("rep; nop" ::: "memory") +#elif defined(__s390x__) +#define cpu_relax() barrier() #else #define cpu_relax() assert(0) #endif diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh index 2e69ca812b4c..29b0d3920bfc 100755 --- a/tools/virtio/ringtest/run-on-all.sh +++ b/tools/virtio/ringtest/run-on-all.sh @@ -1,12 +1,13 @@ #!/bin/sh +CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#') #use last CPU for host. Why not the first? #many devices tend to use cpu0 by default so #it tends to be busier -HOST_AFFINITY=$(lscpu -p=cpu | tail -1) +HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1) #run command on all cpus -for cpu in $(seq 0 $HOST_AFFINITY) +for cpu in $CPUS_ONLINE do #Don't run guest and host on same CPU #It actually works ok if using signalling diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index a2dbbccbb6a3..6a084cd57b88 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -24,6 +24,7 @@ #include <clocksource/arm_arch_timer.h> #include <asm/arch_timer.h> +#include <asm/kvm_hyp.h> #include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h> @@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) struct kvm_vcpu *vcpu; vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); - vcpu->arch.timer_cpu.armed = false; - - WARN_ON(!kvm_timer_should_fire(vcpu)); /* * If the vcpu is blocked we want to wake it up so that it will see @@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm) { kvm->arch.timer.cntvoff = kvm_phys_timer_read(); } + +/* + * On VHE system, we only need to configure trap on physical timer and counter + * accesses in EL0 and EL1 once, not for every world switch. + * The host kernel runs at EL2 with HCR_EL2.TGE == 1, + * and this makes those bits have no effect for the host kernel execution. + */ +void kvm_timer_init_vhe(void) +{ + /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ + u32 cnthctl_shift = 10; + u64 val; + + /* + * Disallow physical timer access for the guest. + * Physical counter access is allowed. + */ + val = read_sysreg(cnthctl_el2); + val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift); + val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); + write_sysreg(val, cnthctl_el2); +} diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index 798866a8d875..63e28dd18bb0 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) /* Disable the virtual timer */ write_sysreg_el0(0, cntv_ctl); - /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, cnthctl_el2); + /* + * We don't need to do this for VHE since the host kernel runs in EL2 + * with HCR_EL2.TGE ==1, which makes those bits have no impact. + */ + if (!has_vhe()) { + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + } /* Clear cntvoff for the host */ write_sysreg(0, cntvoff_el2); @@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; u64 val; - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); + /* Those bits are already configured at boot on VHE-system */ + if (!has_vhe()) { + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + } if (timer->enabled) { write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 5114391b7e5a..c737ea0a310a 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; - mutex_lock(&kvm->lock); - dist->ready = false; dist->initialized = false; kfree(dist->spis); dist->nr_spis = 0; - - mutex_unlock(&kvm->lock); } void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) INIT_LIST_HEAD(&vgic_cpu->ap_list_head); } -void kvm_vgic_destroy(struct kvm *kvm) +/* To be called with kvm->lock held */ +static void __kvm_vgic_destroy(struct kvm *kvm) { struct kvm_vcpu *vcpu; int i; @@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm) kvm_vgic_vcpu_destroy(vcpu); } +void kvm_vgic_destroy(struct kvm *kvm) +{ + mutex_lock(&kvm->lock); + __kvm_vgic_destroy(kvm); + mutex_unlock(&kvm->lock); +} + /** * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest * is a GICv2. A GICv3 must be explicitly initialized by the guest using the @@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm) ret = vgic_v2_map_resources(kvm); else ret = vgic_v3_map_resources(kvm); + + if (ret) + __kvm_vgic_destroy(kvm); + out: mutex_unlock(&kvm->lock); return ret; diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 9bab86757fa4..834137e7b83f 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm) dist->ready = true; out: - if (ret) - kvm_vgic_destroy(kvm); return ret; } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 5c9f9745e6ca..e6b03fd8c374 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm) dist->ready = true; out: - if (ret) - kvm_vgic_destroy(kvm); return ret; } |