diff options
author | David S. Miller <davem@davemloft.net> | 2016-05-09 22:59:24 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-09 22:59:24 +0300 |
commit | e800072c18f0d7b89a80fa46dceb3d080c80e09c (patch) | |
tree | 8da6cb7944762a60ec37594720c1ad2757631c2f | |
parent | e8ed77dfa90dd79c5343415a4bbbfdab9787b35a (diff) | |
parent | b507146bb6b9ac0c0197100ba3e299825a21fed3 (diff) | |
download | linux-e800072c18f0d7b89a80fa46dceb3d080c80e09c.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
In netdevice.h we removed the structure in net-next that is being
changes in 'net'. In macsec.c and rtnetlink.c we have overlaps
between fixes in 'net' and the u64 attribute changes in 'net-next'.
The mlx5 conflicts have to do with vxlan support dependencies.
Signed-off-by: David S. Miller <davem@davemloft.net>
147 files changed, 1271 insertions, 577 deletions
@@ -69,6 +69,7 @@ Jean Tourrilhes <jt@hpl.hp.com> Jeff Garzik <jgarzik@pretzel.yyz.us> Jens Axboe <axboe@suse.de> Jens Osterkamp <Jens.Osterkamp@de.ibm.com> +John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> John Stultz <johnstul@us.ibm.com> <josh@joshtriplett.org> <josh@freedesktop.org> <josh@joshtriplett.org> <josh@kernel.org> diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt index 30df832a6f2f..87adfb227ca9 100644 --- a/Documentation/devicetree/bindings/ata/ahci-platform.txt +++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt @@ -32,6 +32,10 @@ Optional properties: - target-supply : regulator for SATA target power - phys : reference to the SATA PHY node - phy-names : must be "sata-phy" +- ports-implemented : Mask that indicates which ports that the HBA supports + are available for software to use. Useful if PORTS_IMPL + is not programmed by the BIOS, which is true with + some embedded SOC's. Required properties when using sub-nodes: - #address-cells : number of cells to encode an address diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt index de2a327766a7..56e36861245f 100644 --- a/Documentation/networking/checksum-offloads.txt +++ b/Documentation/networking/checksum-offloads.txt @@ -69,18 +69,18 @@ LCO: Local Checksum Offload LCO is a technique for efficiently computing the outer checksum of an encapsulated datagram when the inner checksum is due to be offloaded. The ones-complement sum of a correctly checksummed TCP or UDP packet is - equal to the sum of the pseudo header, because everything else gets - 'cancelled out' by the checksum field. This is because the sum was + equal to the complement of the sum of the pseudo header, because everything + else gets 'cancelled out' by the checksum field. This is because the sum was complemented before being written to the checksum field. More generally, this holds in any case where the 'IP-style' ones complement checksum is used, and thus any checksum that TX Checksum Offload supports. That is, if we have set up TX Checksum Offload with a start/offset pair, we - know that _after the device has filled in that checksum_, the ones + know that after the device has filled in that checksum, the ones complement sum from csum_start to the end of the packet will be equal to - _whatever value we put in the checksum field beforehand_. This allows us - to compute the outer checksum without looking at the payload: we simply - stop summing when we get to csum_start, then add the 16-bit word at - (csum_start + csum_offset). + the complement of whatever value we put in the checksum field beforehand. + This allows us to compute the outer checksum without looking at the payload: + we simply stop summing when we get to csum_start, then add the complement of + the 16-bit word at (csum_start + csum_offset). Then, when the true inner checksum is filled in (either by hardware or by skb_checksum_help()), the outer checksum will become correct by virtue of the arithmetic. diff --git a/MAINTAINERS b/MAINTAINERS index e425912ff933..b57df66532d2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -872,9 +872,9 @@ F: drivers/perf/arm_pmu.c F: include/linux/perf/arm_pmu.h ARM PORT -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/ @@ -886,35 +886,35 @@ F: arch/arm/plat-*/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git ARM PRIMECELL AACI PL041 DRIVER -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: sound/arm/aaci.* ARM PRIMECELL CLCD PL110 DRIVER -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: drivers/video/fbdev/amba-clcd.* ARM PRIMECELL KMI PL050 DRIVER -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: drivers/input/serio/ambakmi.* F: include/linux/amba/kmi.h ARM PRIMECELL MMCI PL180/1 DRIVER -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: drivers/mmc/host/mmci.* F: include/linux/amba/mmci.h ARM PRIMECELL UART PL010 AND PL011 DRIVERS -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: drivers/tty/serial/amba-pl01*.c F: include/linux/amba/serial.h ARM PRIMECELL BUS SUPPORT -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> S: Maintained F: drivers/amba/ F: include/linux/amba/bus.h @@ -1036,7 +1036,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/CLKDEV SUPPORT -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/include/asm/clkdev.h @@ -1093,9 +1093,9 @@ F: arch/arm/boot/dts/cx92755* N: digicolor ARM/EBSA110 MACHINE SUPPORT -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/mach-ebsa110/ F: drivers/net/ethernet/amd/am79c961a.* @@ -1124,9 +1124,9 @@ T: git git://git.berlios.de/gemini-board F: arch/arm/mm/*-fa* ARM/FOOTBRIDGE ARCHITECTURE -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ @@ -1457,7 +1457,7 @@ S: Maintained ARM/PT DIGITAL BOARD PORT M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained ARM/QUALCOMM SUPPORT @@ -1493,9 +1493,9 @@ S: Supported F: arch/arm64/boot/dts/renesas/ ARM/RISCPC ARCHITECTURE -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/include/asm/hardware/entry-macro-iomd.S F: arch/arm/include/asm/hardware/ioc.h @@ -1773,9 +1773,9 @@ F: drivers/clk/versatile/clk-vexpress-osc.c F: drivers/clocksource/versatile.c ARM/VFP SUPPORT -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: arch/arm/vfp/ @@ -2924,7 +2924,7 @@ F: mm/cleancache.c F: include/linux/cleancache.h CLK API -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-clk@vger.kernel.org S: Maintained F: include/linux/clk.h @@ -3358,9 +3358,9 @@ S: Supported F: drivers/net/ethernet/stmicro/stmmac/ CYBERPRO FB DRIVER -M: Russell King <linux@arm.linux.org.uk> +M: Russell King <linux@armlinux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.arm.linux.org.uk/ +W: http://www.armlinux.org.uk/ S: Maintained F: drivers/video/fbdev/cyber2000fb.* @@ -3885,7 +3885,7 @@ F: Documentation/devicetree/bindings/display/st,stih4xx.txt DRM DRIVERS FOR VIVANTE GPU IP M: Lucas Stach <l.stach@pengutronix.de> -R: Russell King <linux+etnaviv@arm.linux.org.uk> +R: Russell King <linux+etnaviv@armlinux.org.uk> R: Christian Gmeiner <christian.gmeiner@gmail.com> L: dri-devel@lists.freedesktop.org S: Maintained @@ -4227,8 +4227,8 @@ F: Documentation/efi-stub.txt F: arch/ia64/kernel/efi.c F: arch/x86/boot/compressed/eboot.[ch] F: arch/x86/include/asm/efi.h -F: arch/x86/platform/efi/* -F: drivers/firmware/efi/* +F: arch/x86/platform/efi/ +F: drivers/firmware/efi/ F: include/linux/efi*.h EFI VARIABLE FILESYSTEM @@ -6902,7 +6902,7 @@ L: linux-man@vger.kernel.org S: Maintained MARVELL ARMADA DRM SUPPORT -M: Russell King <rmk+kernel@arm.linux.org.uk> +M: Russell King <rmk+kernel@armlinux.org.uk> S: Maintained F: drivers/gpu/drm/armada/ @@ -7902,7 +7902,7 @@ S: Supported F: drivers/nfc/nxp-nci NXP TDA998X DRM DRIVER -M: Russell King <rmk+kernel@arm.linux.org.uk> +M: Russell King <rmk+kernel@armlinux.org.uk> S: Supported F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h @@ -7975,7 +7975,7 @@ F: arch/arm/*omap*/*pm* F: drivers/cpufreq/omap-cpufreq.c OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT -M: Rajendra Nayak <rnayak@ti.com> +M: Rajendra Nayak <rnayak@codeaurora.org> M: Paul Walmsley <paul@pwsan.com> L: linux-omap@vger.kernel.org S: Maintained @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Charred Weasel # *DOCUMENTATION* diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ec4791ea6911..a8767430df7d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -58,6 +58,9 @@ config GENERIC_CSUM config RWSEM_GENERIC_SPINLOCK def_bool y +config ARCH_DISCONTIGMEM_ENABLE + def_bool y + config ARCH_FLATMEM_ENABLE def_bool y @@ -347,6 +350,15 @@ config ARC_HUGEPAGE_16M endchoice +config NODES_SHIFT + int "Maximum NUMA Nodes (as a power of 2)" + default "1" if !DISCONTIGMEM + default "2" if DISCONTIGMEM + depends on NEED_MULTIPLE_NODES + ---help--- + Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory + zones. + if ISA_ARCOMPACT config ARC_COMPACT_IRQ_LEVELS @@ -455,6 +467,7 @@ config LINUX_LINK_BASE config HIGHMEM bool "High Memory Support" + select DISCONTIGMEM help With ARC 2G:2G address split, only upper 2G is directly addressable by kernel. Enable this to potentially allow access to rest of 2G and PAE diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 17f85c9c73cf..c22b181e8206 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -13,6 +13,15 @@ #include <asm/byteorder.h> #include <asm/page.h> +#ifdef CONFIG_ISA_ARCV2 +#include <asm/barrier.h> +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags); @@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr); #define ioremap_wc(phy, sz) ioremap(phy, sz) #define ioremap_wt(phy, sz) ioremap(phy, sz) +/* + * io{read,write}{16,32}be() macros + */ +#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) + +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); }) + /* Change struct page to physical address */ #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) @@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) } -#ifdef CONFIG_ISA_ARCV2 -#include <asm/barrier.h> -#define __iormb() rmb() -#define __iowmb() wmb() -#else -#define __iormb() do { } while (0) -#define __iowmb() do { } while (0) -#endif - /* * MMIO can also get buffered/optimized in micro-arch, so barriers needed * Based on ARM model for the typical use case diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h new file mode 100644 index 000000000000..8e97136413d9 --- /dev/null +++ b/arch/arc/include/asm/mmzone.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_ARC_MMZONE_H +#define _ASM_ARC_MMZONE_H + +#ifdef CONFIG_DISCONTIGMEM + +extern struct pglist_data node_data[]; +#define NODE_DATA(nid) (&node_data[nid]) + +static inline int pfn_to_nid(unsigned long pfn) +{ + int is_end_low = 1; + + if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) + is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); + + /* + * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF + * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF + * HIGHMEM with PAE40: 0x1_0000_0000 to ... + */ + if (pfn >= ARCH_PFN_OFFSET && is_end_low) + return 0; + + return 1; +} + +static inline int pfn_valid(unsigned long pfn) +{ + int nid = pfn_to_nid(pfn); + + return (pfn <= node_end_pfn(nid)); +} +#endif /* CONFIG_DISCONTIGMEM */ + +#endif diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 36da89e2c853..0d53854884d0 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -72,11 +72,20 @@ typedef unsigned long pgprot_t; typedef pte_t * pgtable_t; +/* + * Use virt_to_pfn with caution: + * If used in pte or paddr related macros, it could cause truncation + * in PAE40 builds + * As a rule of thumb, only use it in helpers starting with virt_ + * You have been warned ! + */ #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) +#ifdef CONFIG_FLATMEM #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) +#endif /* * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) @@ -85,12 +94,10 @@ typedef pte_t * pgtable_t; * virt here means link-address/program-address as embedded in object code. * And for ARC, link-addr = physical address */ -#define __pa(vaddr) ((unsigned long)vaddr) +#define __pa(vaddr) ((unsigned long)(vaddr)) #define __va(paddr) ((void *)((unsigned long)(paddr))) -#define virt_to_page(kaddr) \ - (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE)) - +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 7d6c93e63adf..10d4b8b8e545 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) #define pmd_present(x) (pmd_val(x)) #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) -#define pte_page(pte) \ - (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE)) - +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) -#define pte_pfn(pte) virt_to_pfn(pte_val(pte)) -#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ - pgprot_val(prot))) -#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1)) +#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) + +/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) /* * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 5487d0b97400..8be930394750 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -30,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE; static unsigned long low_mem_sz; #ifdef CONFIG_HIGHMEM -static unsigned long min_high_pfn; +static unsigned long min_high_pfn, max_high_pfn; static u64 high_mem_start; static u64 high_mem_sz; #endif +#ifdef CONFIG_DISCONTIGMEM +struct pglist_data node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); +#endif + /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ static int __init setup_mem_sz(char *str) { @@ -109,13 +114,11 @@ void __init setup_arch_memory(void) /* Last usable page of low mem */ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); -#ifdef CONFIG_HIGHMEM - min_high_pfn = PFN_DOWN(high_mem_start); - max_pfn = PFN_DOWN(high_mem_start + high_mem_sz); +#ifdef CONFIG_FLATMEM + /* pfn_valid() uses this */ + max_mapnr = max_low_pfn - min_low_pfn; #endif - max_mapnr = max_pfn - min_low_pfn; - /*------------- bootmem allocator setup -----------------------*/ /* @@ -129,7 +132,7 @@ void __init setup_arch_memory(void) * the crash */ - memblock_add(low_mem_start, low_mem_sz); + memblock_add_node(low_mem_start, low_mem_sz, 0); memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); #ifdef CONFIG_BLK_DEV_INITRD @@ -149,13 +152,6 @@ void __init setup_arch_memory(void) zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; zones_holes[ZONE_NORMAL] = 0; -#ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; - - /* This handles the peripheral address space hole */ - zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn; -#endif - /* * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong @@ -168,6 +164,34 @@ void __init setup_arch_memory(void) zones_holes); /* holes */ #ifdef CONFIG_HIGHMEM + /* + * Populate a new node with highmem + * + * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based) + * than addresses in normal ala low memory (0x8000_0000 based). + * Even with PAE, the huge peripheral space hole would waste a lot of + * mem with single mem_map[]. This warrants a mem_map per region design. + * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM. + * + * DISCONTIGMEM in turns requires multiple nodes. node 0 above is + * populated with normal memory zone while node 1 only has highmem + */ + node_set_online(1); + + min_high_pfn = PFN_DOWN(high_mem_start); + max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); + + zones_size[ZONE_NORMAL] = 0; + zones_holes[ZONE_NORMAL] = 0; + + zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn; + zones_holes[ZONE_HIGHMEM] = 0; + + free_area_init_node(1, /* node-id */ + zones_size, /* num pages per zone */ + min_high_pfn, /* first pfn of node */ + zones_holes); /* holes */ + high_memory = (void *)(min_high_pfn << PAGE_SHIFT); kmap_init(); #endif @@ -185,7 +209,7 @@ void __init mem_init(void) unsigned long tmp; reset_all_zones_managed_pages(); - for (tmp = min_high_pfn; tmp < max_pfn; tmp++) + for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) free_highmem_page(pfn_to_page(tmp)); #endif diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index b3c26a96a726..d9e2d9c6e999 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -329,6 +329,7 @@ regulator-name = "V28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ regulator-always-on; /* due to battery cover sensor */ }; @@ -336,30 +337,35 @@ regulator-name = "VCSI"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ }; &vaux3 { regulator-name = "VMMC2_30"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <3000000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ }; &vaux4 { regulator-name = "VCAM_ANA_28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ }; &vmmc1 { regulator-name = "VMMC1"; regulator-min-microvolt = <1850000>; regulator-max-microvolt = <3150000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ }; &vmmc2 { regulator-name = "V28_A"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <3000000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ regulator-always-on; /* due VIO leak to AIC34 VDDs */ }; @@ -367,6 +373,7 @@ regulator-name = "VPLL"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ regulator-always-on; }; @@ -374,6 +381,7 @@ regulator-name = "VSDI_CSI"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ regulator-always-on; }; @@ -381,6 +389,7 @@ regulator-name = "VMMC2_IO_18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ }; &vio { diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi index 387dc31822fe..96f8ce7bd2af 100644 --- a/arch/arm/boot/dts/omap34xx.dtsi +++ b/arch/arm/boot/dts/omap34xx.dtsi @@ -46,7 +46,7 @@ 0x480bd800 0x017c>; interrupts = <24>; iommus = <&mmu_isp>; - syscon = <&scm_conf 0xdc>; + syscon = <&scm_conf 0x6c>; ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>; #clock-cells = <1>; ports { diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 902657d6713b..914bf4c47404 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -472,7 +472,7 @@ ldo1_reg: ldo1 { /* VDDAPHY_CAM: vdda_csiport */ regulator-name = "ldo1"; - regulator-min-microvolt = <1500000>; + regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; @@ -498,7 +498,7 @@ ldo4_reg: ldo4 { /* VDDAPHY_DISP: vdda_dsiport/hdmi */ regulator-name = "ldo4"; - regulator-min-microvolt = <1500000>; + regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index ecc591dc0778..4d87d9c6c86d 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -513,7 +513,7 @@ ldo1_reg: ldo1 { /* VDDAPHY_CAM: vdda_csiport */ regulator-name = "ldo1"; - regulator-min-microvolt = <1500000>; + regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; @@ -537,7 +537,7 @@ ldo4_reg: ldo4 { /* VDDAPHY_DISP: vdda_dsiport/hdmi */ regulator-name = "ldo4"; - regulator-min-microvolt = <1500000>; + regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 38805ebbe2ba..120b6b80cd39 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -269,7 +269,7 @@ omap5_pmx_wkup: pinmux@c840 { compatible = "ti,omap5-padconf", "pinctrl-single"; - reg = <0xc840 0x0038>; + reg = <0xc840 0x003c>; #address-cells = <1>; #size-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 65d0e8d98259..04f541bffbdd 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -666,7 +666,7 @@ }; sata0: sata@29000000 { - compatible = "generic-ahci"; + compatible = "qcom,apq8064-ahci", "generic-ahci"; status = "disabled"; reg = <0x29000000 0x180>; interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>; @@ -688,6 +688,7 @@ phys = <&sata_phy0>; phy-names = "sata-phy"; + ports-implemented = <0x1>; }; /* Temporary fixed regulator */ diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi index 9d2b7e2f5975..346a49d805a7 100644 --- a/arch/arm/boot/dts/sun8i-q8-common.dtsi +++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi @@ -125,8 +125,6 @@ }; ®_dc1sw { - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; regulator-name = "vcc-lcd"; }; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index fc8ba1663601..99d9f630d6b6 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -84,6 +84,7 @@ #ifndef __ASSEMBLY__ +#ifdef CONFIG_CPU_CP15_MMU static inline unsigned int get_domain(void) { unsigned int domain; @@ -103,6 +104,16 @@ static inline void set_domain(unsigned val) : : "r" (val) : "memory"); isb(); } +#else +static inline unsigned int get_domain(void) +{ + return 0; +} + +static inline void set_domain(unsigned val) +{ +} +#endif #ifdef CONFIG_CPU_USE_DOMAINS #define modify_domain(dom,type) \ diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 9b8c5a113434..fb1a69eb49c1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -236,7 +236,7 @@ ENTRY(__setup_mpu) mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ - mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) + mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled beq 3f @ Memory-map not unified diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 58dbd5c439df..d6d4191e68f2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) kvm_pfn_t pfn = *pfnp; gfn_t gfn = *ipap >> PAGE_SHIFT; - if (PageTransCompound(pfn_to_page(pfn))) { + if (PageTransCompoundMap(pfn_to_page(pfn))) { unsigned long mask; /* * The address we faulted on is backed by a transparent huge diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index d97c588550ad..bc4e63fa9808 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -121,6 +121,11 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context) const char *partnum = NULL; struct davinci_soc_info *soc_info = &davinci_soc_info; + if (!IS_BUILTIN(CONFIG_NVMEM)) { + pr_warn("Factory Config not available without CONFIG_NVMEM\n"); + goto bad_config; + } + ret = nvmem_device_read(nvmem, 0, sizeof(factory_config), &factory_config); if (ret != sizeof(struct factory_config)) { diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c index f55ef2ef2f92..742133b7266a 100644 --- a/arch/arm/mach-davinci/common.c +++ b/arch/arm/mach-davinci/common.c @@ -33,6 +33,11 @@ void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context) char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; off_t offset = (off_t)context; + if (!IS_BUILTIN(CONFIG_NVMEM)) { + pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n"); + return; + } + /* Read MAC addr from EEPROM */ if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 7c21760f590f..875a2bab64f6 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) if (IS_ERR(pd->clk[i])) break; - if (IS_ERR(pd->clk[i])) + if (IS_ERR(pd->pclk[i])) continue; /* Skip on first power up */ if (clk_set_parent(pd->clk[i], pd->pclk[i])) pr_err("%s: error setting parent to clock%d\n", diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S index 5d94b7a2fb10..c160fa3007e9 100644 --- a/arch/arm/mach-socfpga/headsmp.S +++ b/arch/arm/mach-socfpga/headsmp.S @@ -13,6 +13,7 @@ #include <asm/assembler.h> .arch armv7-a + .arm ENTRY(secondary_trampoline) /* CPU1 will always fetch from 0x0 when it is brought out of reset. diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 1dd10936d68d..d5805e4bf2fc 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -87,7 +87,6 @@ static unsigned long irbar_read(void) /* MPU initialisation functions */ void __init sanity_check_meminfo_mpu(void) { - int i; phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; struct memblock_region *reg; @@ -110,11 +109,13 @@ void __init sanity_check_meminfo_mpu(void) } else { /* * memblock auto merges contiguous blocks, remove - * all blocks afterwards + * all blocks afterwards in one go (we can't remove + * blocks separately while iterating) */ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", - &mem_start, ®->base); - memblock_remove(reg->base, reg->size); + &mem_end, ®->base); + memblock_remove(reg->base, 0 - reg->base); + break; } } @@ -144,7 +145,7 @@ void __init sanity_check_meminfo_mpu(void) pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", &specified_mem_size, &aligned_region_size); memblock_remove(mem_start + aligned_region_size, - specified_mem_size - aligned_round_size); + specified_mem_size - aligned_region_size); mem_end = mem_start + aligned_region_size; } @@ -261,7 +262,7 @@ void __init mpu_setup(void) return; region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, - ilog2(meminfo.bank[0].size), + ilog2(memblock.memory.regions[0].size), MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); if (region_err) { panic("MPU region initialization failure! %d", region_err); @@ -285,7 +286,7 @@ void __init arm_mm_memblock_reserve(void) * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); + memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); #else /* ifndef CONFIG_CPU_V7M */ /* * There is no dedicated vector page on V7-M. So nothing needs to be diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index a7315ebe3883..706d2426024f 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi @@ -120,7 +120,6 @@ compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <0>; - status = "disabled"; }; soc { diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index c976ebfe2269..57b4836b7ecd 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -344,7 +344,7 @@ tracesys_next: #endif cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ - comiclr,>>= __NR_Linux_syscalls, %r20, %r0 + comiclr,>> __NR_Linux_syscalls, %r20, %r0 b,n .Ltracesys_nosys LDREGX %r20(%r19), %r19 diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index e4396a7d0f7c..4afe66aa1400 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits) "andc %1,%1,%2\n\t" "popcntd %0,%1" : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) - : "r" (bits)); + : "b" (bits)); return leading_zero_bits; } diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 40625ca7a190..6011a573dd64 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -474,6 +474,7 @@ static __init int _init_perf_amd_iommu( static struct perf_amd_iommu __perf_iommu = { .pmu = { + .task_ctx_nr = perf_invalid_context, .event_init = perf_iommu_event_init, .add = perf_iommu_add, .del = perf_iommu_del, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index aff79884e17d..a6fd4dbcf820 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3637,6 +3637,8 @@ __init int intel_pmu_init(void) pr_cont("Knights Landing events, "); break; + case 142: /* 14nm Kabylake Mobile */ + case 158: /* 14nm Kabylake Desktop */ case 78: /* 14nm Skylake Mobile */ case 94: /* 14nm Skylake Desktop */ case 85: /* 14nm Skylake Server */ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 8f4942e2bcbb..d7ce96a7daca 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -891,9 +891,7 @@ void __init uv_system_init(void) } pr_info("UV: Found %s hub\n", hub); - /* We now only need to map the MMRs on UV1 */ - if (is_uv1_hub()) - map_low_mmrs(); + map_low_mmrs(); m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); m_val = m_n_config.s.m_skt; diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index b285d4e8c68e..5da924bbf0a0 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id) continue; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { resource_size_t start, end; + unsigned long flags; + + flags = pci_resource_flags(dev, i); + if (!(flags & IORESOURCE_MEM)) + continue; + + if (flags & IORESOURCE_UNSET) + continue; + + if (pci_resource_len(dev, i) == 0) + continue; start = pci_resource_start(dev, i); - if (start == 0) - break; end = pci_resource_end(dev, i); if (screen_info.lfb_base >= start && screen_info.lfb_base < end) { found_bar = 1; + break; } } } diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 92ae6acac8a7..6aa0f4d9eea6 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void) if (freq_desc_tables[cpu_index].msr_plat) { rdmsr(MSR_PLATFORM_INFO, lo, hi); - ratio = (lo >> 8) & 0x1f; + ratio = (lo >> 8) & 0xff; } else { rdmsr(MSR_IA32_PERF_STATUS, lo, hi); ratio = (hi >> 8) & 0x1f; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1ff4dbb73fb7..b6f50e8b0a39 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, */ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && - PageTransCompound(pfn_to_page(pfn)) && + PageTransCompoundMap(pfn_to_page(pfn)) && !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; /* @@ -4785,7 +4785,7 @@ restart: */ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && - PageTransCompound(pfn_to_page(pfn))) { + PageTransCompoundMap(pfn_to_page(pfn))) { drop_spte(kvm, sptep); need_tlb_flush = 1; goto restart; diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index a2433817c987..6a2f5691b1ab 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -43,40 +43,40 @@ void __init efi_bgrt_init(void) return; if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { - pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n", + pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", bgrt_tab->header.length, sizeof(*bgrt_tab)); return; } if (bgrt_tab->version != 1) { - pr_err("Ignoring BGRT: invalid version %u (expected 1)\n", + pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n", bgrt_tab->version); return; } if (bgrt_tab->status & 0xfe) { - pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n", + pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", bgrt_tab->status); return; } if (bgrt_tab->image_type != 0) { - pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", + pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", bgrt_tab->image_type); return; } if (!bgrt_tab->image_address) { - pr_err("Ignoring BGRT: null image address\n"); + pr_notice("Ignoring BGRT: null image address\n"); return; } image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); if (!image) { - pr_err("Ignoring BGRT: failed to map image header memory\n"); + pr_notice("Ignoring BGRT: failed to map image header memory\n"); return; } memcpy(&bmp_header, image, sizeof(bmp_header)); memunmap(image); if (bmp_header.id != 0x4d42) { - pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", + pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", bmp_header.id); return; } @@ -84,14 +84,14 @@ void __init efi_bgrt_init(void) bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); if (!bgrt_image) { - pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", + pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", bgrt_image_size); return; } image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); if (!image) { - pr_err("Ignoring BGRT: failed to map image memory\n"); + pr_notice("Ignoring BGRT: failed to map image memory\n"); kfree(bgrt_image); bgrt_image = NULL; return; diff --git a/crypto/Kconfig b/crypto/Kconfig index 93a1fdc1feee..1d33beb6a1ae 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -96,6 +96,7 @@ config CRYPTO_AKCIPHER config CRYPTO_RSA tristate "RSA algorithm" select CRYPTO_AKCIPHER + select CRYPTO_MANAGER select MPILIB select ASN1 help diff --git a/crypto/ahash.c b/crypto/ahash.c index 5fc1f172963d..3887a98abcc3 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) struct scatterlist *sg; sg = walk->sg; - walk->pg = sg_page(sg); walk->offset = sg->offset; + walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + walk->offset = offset_in_page(walk->offset); walk->entrylen = sg->length; if (walk->entrylen > walk->total) diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 1982310e6d83..da198b864107 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, obj_desc->method.mutex->mutex. original_sync_level = obj_desc->method.mutex->mutex.sync_level; + + obj_desc->method.mutex->mutex.thread_id = + acpi_os_get_thread_id(); } } diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index d0f35e63640b..63cc9dbe4f3b 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, offset); rc = -ENXIO; } - } else + } else { rc = 0; + if (cmd_rc) + *cmd_rc = xlat_status(buf, cmd); + } out: ACPI_FREE(out_obj); diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 5083f85efea7..cfa936a32513 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -202,6 +202,14 @@ config SATA_FSL If unsure, say N. +config SATA_AHCI_SEATTLE + tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support" + depends on ARCH_SEATTLE + help + This option enables support for AMD Seattle SATA host controller. + + If unsure, say N + config SATA_INIC162X tristate "Initio 162x SATA support (Very Experimental)" depends on PCI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 18579521464e..0b2afb7e5f35 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_ATA) += libata.o # non-SFF interface obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o +obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o obj-$(CONFIG_SATA_FSL) += sata_fsl.o obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 40442332bfa7..62a04c8fb5c9 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev) if (rc) return rc; + of_property_read_u32(dev->of_node, + "ports-implemented", &hpriv->force_port_map); + if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c new file mode 100644 index 000000000000..6e702ab57220 --- /dev/null +++ b/drivers/ata/ahci_seattle.c @@ -0,0 +1,210 @@ +/* + * AMD Seattle AHCI SATA driver + * + * Copyright (c) 2015, Advanced Micro Devices + * Author: Brijesh Singh <brijesh.singh@amd.com> + * + * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/ahci_platform.h> +#include <linux/acpi.h> +#include <linux/pci_ids.h> +#include "ahci.h" + +/* SGPIO Control Register definition + * + * Bit Type Description + * 31 RW OD7.2 (activity) + * 30 RW OD7.1 (locate) + * 29 RW OD7.0 (fault) + * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED) + * 7 RO SGPIO feature flag + * 6:4 RO Reserved + * 3:0 RO Number of ports (0 means no port supported) + */ +#define ACTIVITY_BIT_POS(x) (8 + (3 * x)) +#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1) +#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1) + +#define ACTIVITY_MASK 0x00010000 +#define LOCATE_MASK 0x00080000 +#define FAULT_MASK 0x00400000 + +#define DRV_NAME "ahci-seattle" + +static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size); + +struct seattle_plat_data { + void __iomem *sgpio_ctrl; +}; + +static struct ata_port_operations ahci_port_ops = { + .inherits = &ahci_ops, +}; + +static const struct ata_port_info ahci_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_port_ops, +}; + +static struct ata_port_operations ahci_seattle_ops = { + .inherits = &ahci_ops, + .transmit_led_message = seattle_transmit_led_message, +}; + +static const struct ata_port_info ahci_port_seattle_info = { + .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY, + .link_flags = ATA_LFLAG_SW_ACTIVITY, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_seattle_ops, +}; + +static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT(DRV_NAME), +}; + +static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + struct seattle_plat_data *plat_data = hpriv->plat_data; + unsigned long flags; + int pmp; + struct ahci_em_priv *emp; + u32 val; + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp >= EM_MAX_SLOTS) + return -EINVAL; + emp = &pp->em_priv[pmp]; + + val = ioread32(plat_data->sgpio_ctrl); + if (state & ACTIVITY_MASK) + val |= 1 << ACTIVITY_BIT_POS((ap->port_no)); + else + val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no))); + + if (state & LOCATE_MASK) + val |= 1 << LOCATE_BIT_POS((ap->port_no)); + else + val &= ~(1 << LOCATE_BIT_POS((ap->port_no))); + + if (state & FAULT_MASK) + val |= 1 << FAULT_BIT_POS((ap->port_no)); + else + val &= ~(1 << FAULT_BIT_POS((ap->port_no))); + + iowrite32(val, plat_data->sgpio_ctrl); + + spin_lock_irqsave(ap->lock, flags); + + /* save off new led state for port/slot */ + emp->led_state = state; + + spin_unlock_irqrestore(ap->lock, flags); + + return size; +} + +static const struct ata_port_info *ahci_seattle_get_port_info( + struct platform_device *pdev, struct ahci_host_priv *hpriv) +{ + struct device *dev = &pdev->dev; + struct seattle_plat_data *plat_data; + u32 val; + + plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL); + if (IS_ERR(plat_data)) + return &ahci_port_info; + + plat_data->sgpio_ctrl = devm_ioremap_resource(dev, + platform_get_resource(pdev, IORESOURCE_MEM, 1)); + if (IS_ERR(plat_data->sgpio_ctrl)) + return &ahci_port_info; + + val = ioread32(plat_data->sgpio_ctrl); + + if (!(val & 0xf)) + return &ahci_port_info; + + hpriv->em_loc = 0; + hpriv->em_buf_sz = 4; + hpriv->em_msg_type = EM_MSG_TYPE_LED; + hpriv->plat_data = plat_data; + + dev_info(dev, "SGPIO LED control is enabled.\n"); + return &ahci_port_seattle_info; +} + +static int ahci_seattle_probe(struct platform_device *pdev) +{ + int rc; + struct ahci_host_priv *hpriv; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + rc = ahci_platform_init_host(pdev, hpriv, + ahci_seattle_get_port_info(pdev, hpriv), + &ahci_platform_sht); + if (rc) + goto disable_resources; + + return 0; +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend, + ahci_platform_resume); + +static const struct acpi_device_id ahci_acpi_match[] = { + { "AMDI0600", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, ahci_acpi_match); + +static struct platform_driver ahci_seattle_driver = { + .probe = ahci_seattle_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = DRV_NAME, + .acpi_match_table = ahci_acpi_match, + .pm = &ahci_pm_ops, + }, +}; +module_platform_driver(ahci_seattle_driver); + +MODULE_DESCRIPTION("Seattle AHCI SATA platform driver"); +MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 3982054060b8..a5d7c1c2a05e 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", port_map, hpriv->force_port_map); port_map = hpriv->force_port_map; + hpriv->saved_port_map = port_map; } if (hpriv->mask_port_map) { diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 433b60092972..d8f4cc22856c 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) reg = opp_table->regulator; if (IS_ERR(reg)) { /* Regulator may not be required for device */ - if (reg) - dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__, - PTR_ERR(reg)); rcu_read_unlock(); return 0; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 9b1a65debd49..7f692accdc90 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -21,7 +21,7 @@ static inline bool is_pset_node(struct fwnode_handle *fwnode) { - return fwnode && fwnode->type == FWNODE_PDATA; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; } static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e93405f0eac4..c4acfc5273b3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1557,21 +1557,25 @@ void cpufreq_suspend(void) if (!cpufreq_driver) return; - if (!has_target()) + if (!has_target() && !cpufreq_driver->suspend) goto suspend; pr_debug("%s: Suspending Governors\n", __func__); for_each_active_policy(policy) { - down_write(&policy->rwsem); - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); - up_write(&policy->rwsem); + if (has_target()) { + down_write(&policy->rwsem); + ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); + up_write(&policy->rwsem); - if (ret) - pr_err("%s: Failed to stop governor for policy: %p\n", - __func__, policy); - else if (cpufreq_driver->suspend - && cpufreq_driver->suspend(policy)) + if (ret) { + pr_err("%s: Failed to stop governor for policy: %p\n", + __func__, policy); + continue; + } + } + + if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) pr_err("%s: Failed to suspend driver: %p\n", __func__, policy); } @@ -1596,7 +1600,7 @@ void cpufreq_resume(void) cpufreq_suspended = false; - if (!has_target()) + if (!has_target() && !cpufreq_driver->resume) return; pr_debug("%s: Resuming Governors\n", __func__); @@ -1605,7 +1609,7 @@ void cpufreq_resume(void) if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { pr_err("%s: Failed to resume driver: %p\n", __func__, policy); - } else { + } else if (has_target()) { down_write(&policy->rwsem); ret = cpufreq_start_governor(policy); up_write(&policy->rwsem); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f502d5b90c25..b230ebaae66c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) } } +static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) +{ + if (hwp_active) + intel_pstate_hwp_set(policy->cpus); + + return 0; +} + static void intel_pstate_hwp_set_online_cpus(void) { get_online_cpus(); @@ -1062,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) static inline int32_t get_avg_frequency(struct cpudata *cpu) { - return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * - cpu->pstate.scaling, cpu->sample.mperf); + return fp_toint(mul_fp(cpu->sample.core_pct_busy, + int_tofp(cpu->pstate.max_pstate_physical * + cpu->pstate.scaling / 100))); } static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) @@ -1106,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) int32_t core_busy, max_pstate, current_pstate, sample_ratio; u64 duration_ns; - intel_pstate_calc_busy(cpu); - /* * core_busy is the ratio of actual performance to max * max_pstate is the max non turbo pstate available @@ -1191,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, if ((s64)delta_ns >= pid_params.sample_rate_ns) { bool sample_taken = intel_pstate_sample(cpu, time); - if (sample_taken && !hwp_active) - intel_pstate_adjust_busy_pstate(cpu); + if (sample_taken) { + intel_pstate_calc_busy(cpu); + if (!hwp_active) + intel_pstate_adjust_busy_pstate(cpu); + } } } @@ -1346,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) out: intel_pstate_set_update_util_hook(policy->cpu); - if (hwp_active) - intel_pstate_hwp_set(policy->cpus); + intel_pstate_hwp_set_policy(policy); return 0; } @@ -1411,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, + .resume = intel_pstate_hwp_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, .stop_cpu = intel_pstate_stop_cpu, diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index a9c659f58974..04042038ec4b 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -259,6 +259,10 @@ static int sti_cpufreq_init(void) { int ret; + if ((!of_machine_is_compatible("st,stih407")) && + (!of_machine_is_compatible("st,stih410"))) + return -ENODEV; + ddata.cpu = get_cpu_device(0); if (!ddata.cpu) { dev_err(ddata.cpu, "Failed to get device for CPU0\n"); diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 545069d5fdfb..e342565e8715 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev, * call the CPU ops suspend protocol with idle index as a * parameter. */ - arm_cpuidle_suspend(idx); + ret = arm_cpuidle_suspend(idx); cpu_pm_exit(); } diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0e82ce3c383e..976b01e58afb 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +int adf_init_pf_wq(void); +void adf_exit_pf_wq(void); #else static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { @@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { } + +static inline int adf_init_pf_wq(void) +{ + return 0; +} + +static inline void adf_exit_pf_wq(void) +{ +} #endif #endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 5c897e6e7994..3c3f948290ca 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void) if (adf_init_aer()) goto err_aer; + if (adf_init_pf_wq()) + goto err_pf_wq; + if (qat_crypto_register()) goto err_crypto_register; return 0; err_crypto_register: + adf_exit_pf_wq(); +err_pf_wq: adf_exit_aer(); err_aer: adf_chr_drv_destroy(); @@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void) { adf_chr_drv_destroy(); adf_exit_aer(); + adf_exit_pf_wq(); qat_crypto_unregister(); adf_clean_vf_map(false); mutex_destroy(&adf_ctl_lock); diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index 1117a8b58280..38a0415e767d 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) int i; u32 reg; - /* Workqueue for PF2VF responses */ - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); - if (!pf2vf_resp_wq) - return -ENOMEM; - for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; i++, vf_info++) { /* This ptr will be populated when VFs will be created */ @@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) kfree(accel_dev->pf.vf_info); accel_dev->pf.vf_info = NULL; - - if (pf2vf_resp_wq) { - destroy_workqueue(pf2vf_resp_wq); - pf2vf_resp_wq = NULL; - } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); + +int __init adf_init_pf_wq(void) +{ + /* Workqueue for PF2VF responses */ + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); + + return !pf2vf_resp_wq ? -ENOMEM : 0; +} + +void adf_exit_pf_wq(void) +{ + if (pf2vf_resp_wq) { + destroy_workqueue(pf2vf_resp_wq); + pf2vf_resp_wq = NULL; + } +} diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 815c4a5cae54..1b95475b6aef 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key) static inline void fw_cfg_read_blob(u16 key, void *buf, loff_t pos, size_t count) { - u32 glk; + u32 glk = -1U; acpi_status status; /* If we have ACPI, ensure mutual exclusion against any potential diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e557fc1f17c8..7ecea83ce453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -541,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (!metadata_size) { if (bo->metadata_size) { kfree(bo->metadata); + bo->metadata = NULL; bo->metadata_size = 0; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1e0bba29e167..1cd6de575305 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + /* vertical FP must be at least 1 */ + if (mode->crtc_vsync_start == mode->crtc_vdisplay) + adjusted_mode->crtc_vsync_start++; + /* get the native mode for scaling */ if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) amdgpu_panel_mode_fixup(encoder, adjusted_mode); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 30798cbc6fc0..6d2fb3f4ac62 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev) static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; + int ret; /* * We have a resume ordering issue with the snd-hda driver also @@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev) * FIXME: This should be solved with a special hdmi sink device or * similar so that power domains can be employed. */ + + /* + * Note that we need to set the power state explicitly, since we + * powered off the device during freeze and the PCI core won't power + * it back up for us during thaw. Powering off the device during + * freeze is not a hard requirement though, and during the + * suspend/resume phases the PCI core makes sure we get here with the + * device powered on. So in case we change our freeze logic and keep + * the device powered we can also remove the following set power state + * call. + */ + ret = pci_set_power_state(dev->pdev, PCI_D0); + if (ret) { + DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); + goto out; + } + + /* + * Note that pci_enable_device() first enables any parent bridge + * device and only then sets the power state for this device. The + * bridge enabling is a nop though, since bridge devices are resumed + * first. The order of enabling power and enabling the device is + * imposed by the PCI core as described above, so here we preserve the + * same order for the freeze/thaw phases. + * + * TODO: eventually we should remove pci_disable_device() / + * pci_enable_enable_device() from suspend/resume. Due to how they + * depend on the device enable refcount we can't anyway depend on them + * disabling/enabling the device. + */ if (pci_enable_device(dev->pdev)) { ret = -EIO; goto out; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f76cbf3e5d1e..fffdac801d3b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2907,7 +2907,14 @@ enum skl_disp_power_wells { #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) #define BXT_RP_STATE_CAP _MMIO(0x138170) -#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) +/* + * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS + * 8300) freezing up around GPU hangs. Looks as if even + * scheduling/timer interrupts start misbehaving if the RPS + * EI/thresholds are "bad", leading to a very sluggish or even + * frozen machine. + */ +#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) #define INTERVAL_0_833_US(us) (((us) * 6) / 5) #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 62de9f4bce09..3b57bf06abe8 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) } else if (IS_BROADWELL(dev_priv)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; - ddi_translations_edp = bdw_ddi_translations_edp; + + if (dev_priv->edp_low_vswing) { + ddi_translations_edp = bdw_ddi_translations_edp; + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + } else { + ddi_translations_edp = bdw_ddi_translations_dp; + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + } + ddi_translations_hdmi = bdw_ddi_translations_hdmi; - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); hdmi_default_entry = 7; @@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_ddi_clock_get(encoder, pipe_config); } -static void intel_ddi_destroy(struct drm_encoder *encoder) -{ - /* HDMI has nothing special to destroy, so we can go with this. */ - intel_dp_encoder_destroy(encoder); -} - static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, } static const struct drm_encoder_funcs intel_ddi_funcs = { - .destroy = intel_ddi_destroy, + .reset = intel_dp_encoder_reset, + .destroy = intel_dp_encoder_destroy, }; static struct intel_connector * @@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->post_disable = intel_ddi_post_disable; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; + intel_encoder->suspend = intel_dp_encoder_suspend; intel_dig_port->port = port; intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6e0d8283daa6..182f84937345 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, } for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (state->legacy_cursor_update) + continue; + ret = intel_crtc_wait_for_pending_flips(crtc); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f069a82deb57..412a34c39522 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } -static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); @@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) edp_panel_vdd_schedule_off(intel_dp); } -static void intel_dp_encoder_reset(struct drm_encoder *encoder) +void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct intel_dp *intel_dp; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4c027d69fac9..7d3af3a72abe 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_encoder_reset(struct drm_encoder *encoder); +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a0d8daed2470..1ab6f687f640 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) hdmi_to_dig_port(intel_hdmi)); } - if (!live_status) - DRM_DEBUG_KMS("Live status not up!"); + if (!live_status) { + DRM_DEBUG_KMS("HDMI live status down\n"); + /* + * Live status register is not reliable on all intel platforms. + * So consider live_status only for certain platforms, for + * others, read EDID to determine presence of sink. + */ + if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) + live_status = true; + } intel_hdmi_unset_edid(connector); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index edd05cdb0cd8..587cae4e73c9 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + /* vertical FP must be at least 1 */ + if (mode->crtc_vsync_start == mode->crtc_vdisplay) + adjusted_mode->crtc_vsync_start++; + /* get the native mode for scaling */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_panel_mode_fixup(encoder, adjusted_mode); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index e00db3f510dd..abb98c77bad2 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) goto err_register; } - pdev->dev.of_node = of_node; pdev->dev.parent = dev; ret = platform_device_add_data(pdev, ®->pdata, @@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) platform_device_put(pdev); goto err_register; } + + /* + * Set of_node only after calling platform_device_add. Otherwise + * the platform:imx-ipuv3-crtc modalias won't be used. + */ + pdev->dev.of_node = of_node; } return 0; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 5613e2b5cff7..a40a73a7b71d 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) * there is room for the producer to send the pending packet. */ -static bool hv_need_to_signal_on_read(u32 prev_write_sz, - struct hv_ring_buffer_info *rbi) +static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) { u32 cur_write_sz; u32 r_size; - u32 write_loc = rbi->ring_buffer->write_index; + u32 write_loc; u32 read_loc = rbi->ring_buffer->read_index; - u32 pending_sz = rbi->ring_buffer->pending_send_sz; + u32 pending_sz; + /* + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: + * If the reading of the pend_sz (in this function) + * were to be reordered and read before we commit the new read + * index (in the calling function) we could + * have a problem. If the host were to set the pending_sz after we + * have sampled pending_sz and go to sleep before we commit the + * read index, we could miss sending the interrupt. Issue a full + * memory barrier to address this. + */ + mb(); + + pending_sz = rbi->ring_buffer->pending_send_sz; + write_loc = rbi->ring_buffer->write_index; /* If the other end is not blocked on write don't bother. */ if (pending_sz == 0) return false; @@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz, cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : read_loc - write_loc; - if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) + if (cur_write_sz >= pending_sz) return true; return false; @@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); - *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); + *signal = hv_need_to_signal_on_read(inring_info); return ret; } diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index dbee13ad33a3..2e154cb51685 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev) if (ret) goto vref_disable; + platform_set_drvdata(pdev, indio_dev); + ret = iio_device_register(indio_dev); if (ret < 0) goto per_clk_disable_unprepare; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index f581256d9d4c..5ee4e0dc093e 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap, return 0; } +static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id) +{ + const struct acpi_device_id *id; + + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!id) + return NULL; + + *chip_id = (int)id->driver_data; + + return dev_name(dev); +} + /** * inv_mpu_probe() - probe function. * @client: i2c client. @@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct inv_mpu6050_state *st; - int result; - const char *name = id ? id->name : NULL; + int result, chip_type; struct regmap *regmap; + const char *name; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) return -EOPNOTSUPP; + if (id) { + chip_type = (int)id->driver_data; + name = id->name; + } else if (ACPI_HANDLE(&client->dev)) { + name = inv_mpu_match_acpi_device(&client->dev, &chip_type); + if (!name) + return -ENODEV; + } else { + return -ENOSYS; + } + regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config); if (IS_ERR(regmap)) { dev_err(&client->dev, "Failed to register i2c regmap %d\n", @@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client, } result = inv_mpu_core_probe(regmap, client->irq, name, - NULL, id->driver_data); + NULL, chip_type); if (result < 0) return result; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index dea6c4361de0..7bcb8d839f05 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi) struct regmap *regmap; const struct spi_device_id *id = spi_get_device_id(spi); const char *name = id ? id->name : NULL; + const int chip_type = id ? id->driver_data : 0; regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); if (IS_ERR(regmap)) { @@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi) } return inv_mpu_core_probe(regmap, spi->irq, name, - inv_mpu_i2c_disable, id->driver_data); + inv_mpu_i2c_disable, chip_type); } static int inv_mpu_remove(struct spi_device *spi) diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 9c5c9ef3f1da..0e931a9a1669 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data) int rc; int irq; + init_waitqueue_head(&data->data_ready_queue); + clear_bit(0, &data->flags); if (client->irq) irq = client->irq; else @@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data) return rc; } - init_waitqueue_head(&data->data_ready_queue); - clear_bit(0, &data->flags); data->eoc_irq = irq; return rc; @@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client, int eoc_gpio; int err; const char *name = NULL; - enum asahi_compass_chipset chipset; + enum asahi_compass_chipset chipset = AK_MAX_TYPE; /* Grab and set up the supplied GPIO. */ if (client->dev.platform_data) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 80b6bedc172f..64b3d11dcf1e 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, struct Scsi_Host *shost; struct iser_conn *iser_conn = NULL; struct ib_conn *ib_conn; + u32 max_fr_sectors; u16 max_cmds; shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); @@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, iser_conn = ep->dd_data; max_cmds = iser_conn->max_cmds; shost->sg_tablesize = iser_conn->scsi_sg_tablesize; - shost->max_sectors = iser_conn->scsi_max_sectors; mutex_lock(&iser_conn->state_mutex); if (iser_conn->state != ISER_CONN_UP) { @@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, */ shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); - shost->max_sectors = min_t(unsigned int, - 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9); if (iscsi_host_add(shost, ib_conn->device->ib_device->dma_device)) { @@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, goto free_host; } + /* + * FRs or FMRs can only map up to a (device) page per entry, but if the + * first entry is misaligned we'll end up using using two entries + * (head and tail) for a single page worth data, so we have to drop + * one segment from the calculation. + */ + max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; + shost->max_sectors = min(iser_max_sectors, max_fr_sectors); + if (cmds_max > max_cmds) { iser_info("cmds_max changed from %u to %u\n", cmds_max, max_cmds); @@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = { .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, - .max_sectors = ISER_DEF_MAX_SECTORS, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 53e33fab3f7a..df3581f60628 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -181,6 +181,14 @@ static void vibra_play_work(struct work_struct *work) { struct vibra_info *info = container_of(work, struct vibra_info, play_work); + int ret; + + /* Do not allow effect, while the routing is set to use audio */ + ret = twl6040_get_vibralr_status(info->twl6040); + if (ret & TWL6040_VIBSEL) { + dev_info(info->dev, "Vibra is configured for audio\n"); + return; + } mutex_lock(&info->mutex); @@ -199,14 +207,6 @@ static int vibra_play(struct input_dev *input, void *data, struct ff_effect *effect) { struct vibra_info *info = input_get_drvdata(input); - int ret; - - /* Do not allow effect, while the routing is set to use audio */ - ret = twl6040_get_vibralr_status(info->twl6040); - if (ret & TWL6040_VIBSEL) { - dev_info(&input->dev, "Vibra is configured for audio\n"); - return -EBUSY; - } info->weak_speed = effect->u.rumble.weak_magnitude; info->strong_speed = effect->u.rumble.strong_magnitude; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 2160512e861a..5af7907d0af4 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset, return 0; } +static int mxt_acquire_irq(struct mxt_data *data) +{ + int error; + + enable_irq(data->irq); + + error = mxt_process_messages_until_invalid(data); + if (error) + return error; + + return 0; +} + static int mxt_soft_reset(struct mxt_data *data) { struct device *dev = &data->client->dev; @@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data) /* Ignore CHG line for 100ms after reset */ msleep(100); - enable_irq(data->irq); + mxt_acquire_irq(data); ret = mxt_wait_for_completion(data, &data->reset_completion, MXT_RESET_TIMEOUT); @@ -1466,19 +1479,6 @@ release_mem: return ret; } -static int mxt_acquire_irq(struct mxt_data *data) -{ - int error; - - enable_irq(data->irq); - - error = mxt_process_messages_until_invalid(data); - if (error) - return error; - - return 0; -} - static int mxt_get_info(struct mxt_data *data) { struct i2c_client *client = data->client; diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 9bbadaaf6bc3..7b3845aa5983 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) point.coord_x = point.coord_y = 0; } - point.state = payload[9 * i + 5] & 0x03; - point.id = (payload[9 * i + 5] & 0xfc) >> 2; + point.state = payload[9 * i + 5] & 0x0f; + point.id = (payload[9 * i + 5] & 0xf0) >> 4; /* determine touch major, minor and orientation */ point.area_major = max(payload[9 * i + 6], diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 6e43c95629ea..3cfd7af8c5ca 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev) } EXPORT_SYMBOL_GPL(media_device_find_devres); +#if IS_ENABLED(CONFIG_PCI) void media_device_pci_init(struct media_device *mdev, struct pci_dev *pci_dev, const char *name) { -#ifdef CONFIG_PCI mdev->dev = &pci_dev->dev; if (name) @@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev, mdev->driver_version = LINUX_VERSION_CODE; media_device_init(mdev); -#endif } EXPORT_SYMBOL_GPL(media_device_pci_init); +#endif +#if IS_ENABLED(CONFIG_USB) void __media_device_usb_init(struct media_device *mdev, struct usb_device *udev, const char *board_name, const char *driver_name) { -#ifdef CONFIG_USB mdev->dev = &udev->dev; if (driver_name) @@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev, mdev->driver_version = LINUX_VERSION_CODE; media_device_init(mdev); -#endif } EXPORT_SYMBOL_GPL(__media_device_usb_init); +#endif #endif /* CONFIG_MEDIA_CONTROLLER */ diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index feb521f28e14..4f494acd8150 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fmd); - /* Protect the media graph while we're registering entities */ - mutex_lock(&fmd->media_dev.graph_mutex); - ret = fimc_md_register_platform_entities(fmd, dev->of_node); - if (ret) { - mutex_unlock(&fmd->media_dev.graph_mutex); + if (ret) goto err_clk; - } ret = fimc_md_register_sensor_entities(fmd); - if (ret) { - mutex_unlock(&fmd->media_dev.graph_mutex); + if (ret) goto err_m_ent; - } - - mutex_unlock(&fmd->media_dev.graph_mutex); ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); if (ret) diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c index 0b44b9accf50..af237af204e2 100644 --- a/drivers/media/platform/s3c-camif/camif-core.c +++ b/drivers/media/platform/s3c-camif/camif-core.c @@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev) if (ret < 0) goto err_sens; - mutex_lock(&camif->media_dev.graph_mutex); - ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); if (ret < 0) - goto err_unlock; + goto err_sens; ret = camif_register_video_nodes(camif); if (ret < 0) - goto err_unlock; + goto err_sens; ret = camif_create_media_links(camif); if (ret < 0) - goto err_unlock; - - mutex_unlock(&camif->media_dev.graph_mutex); + goto err_sens; ret = media_device_register(&camif->media_dev); if (ret < 0) @@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev) pm_runtime_put(dev); return 0; -err_unlock: - mutex_unlock(&camif->media_dev.graph_mutex); err_sens: v4l2_device_unregister(&camif->v4l2_dev); media_device_unregister(&camif->media_dev); diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c index e94c7fb6712a..88e45234d527 100644 --- a/drivers/misc/mic/vop/vop_vringh.c +++ b/drivers/misc/mic/vop/vop_vringh.c @@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ret = -EFAULT; goto free_ret; } + /* Ensure desc has not changed between the two reads */ + if (memcmp(&dd, dd_config, sizeof(dd))) { + ret = -EINVAL; + goto free_ret; + } mutex_lock(&vdev->vdev_mutex); mutex_lock(&vi->vop_mutex); ret = vop_virtio_add_device(vdev, dd_config); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 409152b21191..aa87049c353d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1608,21 +1608,22 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_init_hw(pdata); if (ret) - goto err; + goto err_netdev; mac_ops = pdata->mac_ops; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { ret = xgene_enet_mdio_config(pdata); if (ret) - goto err; + goto err_netdev; } else { INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); } xgene_enet_napi_add(pdata); return 0; -err: +err_netdev: unregister_netdev(ndev); +err: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fd85b6dd4a6e..6a5a71710fa9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1439,6 +1439,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (!TX_CMP_VALID(txcmp, raw_cons)) break; + /* The valid test of the entry must be done first before + * reading any further. + */ + rmb(); if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ @@ -4096,9 +4100,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) } static int bnxt_cfg_rx_mode(struct bnxt *); +static bool bnxt_mc_list_updated(struct bnxt *, u32 *); static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int rc = 0; if (irq_re_init) { @@ -4154,13 +4160,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); goto err_out; } - bp->vnic_info[0].uc_filter_count = 1; + vnic->uc_filter_count = 1; - bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) - bp->vnic_info[0].rx_mask |= - CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + if (bp->dev->flags & IFF_ALLMULTI) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else { + u32 mask = 0; + + bnxt_mc_list_updated(bp, &mask); + vnic->rx_mask |= mask; + } rc = bnxt_cfg_rx_mode(bp); if (rc) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bfa10c3da35f..c9f77c324535 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget) struct fec_enet_private *fep = netdev_priv(ndev); for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - clear_bit(queue_id, &fep->work_rx); - pkt_received += fec_enet_rx_queue(ndev, + int ret; + + ret = fec_enet_rx_queue(ndev, budget - pkt_received, queue_id); + + if (ret < budget - pkt_received) + clear_bit(queue_id, &fep->work_rx); + + pkt_received += ret; } return pkt_received; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 8ef6875b6cf9..c1b3a9c8cf3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -698,7 +698,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); csum_pseudo_hdr = csum_partial(&ipv6h->saddr, sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 559d11a443bc..f5c3b9465d8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -14,7 +14,6 @@ config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE select PTP_1588_CLOCK - select VXLAN if MLX5_CORE=y default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. @@ -32,3 +31,10 @@ config MLX5_CORE_EN_DCB This flag is depended on the kernel's DCB support. If unsure, set to Y + +config MLX5_CORE_EN_VXLAN + bool "VXLAN offloads Support" + default y + depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m) + ---help--- + Say Y here if you want to use VXLAN offloads in the driver. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b531d4f3c00b..e4a5b37b90ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,6 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o + en_txrx.o en_clock.o en_tc.o en_arfs.o +mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bfa5daaaf5aa..7aea32e085b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -522,7 +522,12 @@ struct mlx5e_priv { struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_flow_steering fs; + struct mlx5e_flow_tables fts; + struct mlx5e_eth_addr_db eth_addr; + struct mlx5e_vlan_db vlan; +#ifdef CONFIG_MLX5_CORE_EN_VXLAN struct mlx5e_vxlan_db vxlan; +#endif struct mlx5e_params params; struct workqueue_struct *wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1c70e518b5c5..b60a1bc6f457 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2509,6 +2509,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } +#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) static void mlx5e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { @@ -2580,6 +2581,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } +#endif static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, @@ -2614,6 +2616,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, +#ifdef CONFIG_MLX5_CORE_EN_VXLAN .ndo_add_vxlan_port = mlx5e_add_vxlan_port, .ndo_del_vxlan_port = mlx5e_del_vxlan_port, .ndo_features_check = mlx5e_features_check, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 129f3527aa14..217ac530a514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -48,14 +48,21 @@ struct mlx5e_vxlan_work { static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { - return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && + return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) && + (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && mlx5_core_is_pf(mdev)); } +#ifdef CONFIG_MLX5_CORE_EN_VXLAN void mlx5e_vxlan_init(struct mlx5e_priv *priv); +void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); +#else +static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {} +static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {} +#endif + void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, u16 port, int add); struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); -void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); #endif /* __MLX5_VXLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 79cdd81d55ab..4a7273771028 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2843,11 +2843,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, lag->ref_count++; return 0; +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); -err_col_port_enable: - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index fb9efb84f13b..3710f19ed6bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + if (err) + goto err_flood_bm_set; + else + goto buffer_out; +err_flood_bm_set: + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, + table_type, range, local_port, !set); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: kfree(sftr_pl); return err; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index db80eb1c6d4f..2b10f1bcd151 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, { int i, v, addr; __le32 *ptr32; + int ret; addr = base; ptr32 = buf; for (i = 0; i < size / sizeof(u32); i++) { - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; + ret = netxen_rom_fast_read(adapter, addr, &v); + if (ret) + return ret; + *ptr32 = cpu_to_le32(v); ptr32++; addr += sizeof(u32); } if ((char *)buf + size > (char *)ptr32) { __le32 local; - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; + ret = netxen_rom_fast_read(adapter, addr, &v); + if (ret) + return ret; local = cpu_to_le32(v); memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); } @@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, - &autoneg) != 0) + &autoneg) == 0) adapter->link_autoneg = autoneg; } else goto link_down; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index cad37af1517d..7a0281a36c28 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter) ptr32 = (__le32 *)&serial_num; offset = NX_FW_SERIAL_NUM_OFFSET; for (i = 0; i < 8; i++) { - if (netxen_rom_fast_read(adapter, offset, &val) == -1) { + err = netxen_rom_fast_read(adapter, offset, &val); + if (err) { dev_err(&pdev->dev, "error reading board info\n"); adapter->driver_mismatch = 1; return; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 82d85ccc9ed1..075faa52eb48 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -429,7 +429,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, u8 xmit_type; u16 idx; u16 hlen; - bool data_split; + bool data_split = false; /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); @@ -2094,8 +2094,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; - DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); - SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -2274,9 +2272,9 @@ static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; - /* 16 rx + 16 tx */ + /* 64 rx + 64 tx */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 32; + pf_params.eth_pf_params.num_cons = 128; qed_ops->common->update_pf_params(cdev, &pf_params); } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 9c40b88fabd5..a6dc11ce497f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -495,8 +495,6 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, int gh_len; int err = -ENOSYS; - udp_tunnel_gro_complete(skb, nhoff); - gh = (struct genevehdr *)(skb->data + nhoff); gh_len = geneve_hlen(gh); type = gh->proto_type; @@ -507,6 +505,9 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); rcu_read_unlock(); + + skb_set_inner_mac_header(skb, nhoff + gh_len); + return err; } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3add2c4aac21..460740ccc238 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -85,7 +85,7 @@ struct gcm_iv { * @tfm: crypto struct, key storage */ struct macsec_key { - u64 id; + u8 id[MACSEC_KEYID_LEN]; struct crypto_aead *tfm; }; @@ -1530,7 +1530,8 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, - [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 }, + [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, + .len = MACSEC_KEYID_LEN, }, [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, .len = MACSEC_MAX_KEY_LEN, }, }; @@ -1577,6 +1578,9 @@ static bool validate_add_rxsa(struct nlattr **attrs) return false; } + if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) + return false; + return true; } @@ -1642,7 +1646,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); - rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); + nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN); rx_sa->sc = rx_sc; rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); @@ -1723,6 +1727,9 @@ static bool validate_add_txsa(struct nlattr **attrs) return false; } + if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) + return false; + return true; } @@ -1778,7 +1785,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; } - tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); + nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN); spin_lock_bh(&tx_sa->lock); tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); @@ -2365,9 +2372,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || - nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, - tx_sa->key.id, - MACSEC_SA_ATTR_PAD) || + nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); @@ -2469,9 +2474,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || - nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, - rx_sa->key.id, - MACSEC_SA_ATTR_PAD) || + nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_nest_cancel(skb, rxsa_nest); nla_nest_cancel(skb, rxsc_nest); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 22b85b097cbc..bd6720962b1f 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -384,7 +384,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) goto wake_up; } - kfree_skb(skb); + consume_skb(skb); while (segs) { struct sk_buff *nskb = segs->next; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2668e528dee4..2f29d20aa08f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -613,8 +613,9 @@ out: static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { - udp_tunnel_gro_complete(skb, nhoff); - + /* Sets 'skb->inner_mac_header' since we are always called with + * 'skb->encapsulation' set. + */ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index f798899338ed..5101f3ab4f29 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -397,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) */ start += start_pad; npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; - if (nd_pfn->mode == PFN_MODE_PMEM) - offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) + if (nd_pfn->mode == PFN_MODE_PMEM) { + unsigned long memmap_size; + + /* + * vmemmap_populate_hugepages() allocates the memmap array in + * HPAGE_SIZE chunks. + */ + memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); + offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align) - start; - else if (nd_pfn->mode == PFN_MODE_RAM) + } else if (nd_pfn->mode == PFN_MODE_RAM) offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; else goto err; diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c index 8ba19bba3156..2bb3c5799ac4 100644 --- a/drivers/nvmem/mxs-ocotp.c +++ b/drivers/nvmem/mxs-ocotp.c @@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size, if (ret) goto close_banks; - while (val_size) { + while (val_size >= reg_size) { if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { /* fill up non-data register */ *buf = 0; @@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size, } buf++; - val_size--; + val_size -= reg_size; offset += reg_size; } diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 96168b819044..e165b7ce29d7 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -126,7 +126,7 @@ struct rio_mport_mapping { struct list_head node; struct mport_dev *md; enum rio_mport_map_dir dir; - u32 rioid; + u16 rioid; u64 rio_addr; dma_addr_t phys_addr; /* for mmap */ void *virt_addr; /* kernel address, for dma_free_coherent */ @@ -137,7 +137,7 @@ struct rio_mport_mapping { struct rio_mport_dma_map { int valid; - uint64_t length; + u64 length; void *vaddr; dma_addr_t paddr; }; @@ -208,7 +208,7 @@ struct mport_cdev_priv { struct kfifo event_fifo; wait_queue_head_t event_rx_wait; spinlock_t fifo_lock; - unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ + u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_chan *dmach; struct list_head async_list; @@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, return -EFAULT; if ((maint_io.offset % 4) || - (maint_io.length == 0) || (maint_io.length % 4)) + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); @@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, offset += 4; } - if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) + if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, + buffer, maint_io.length))) ret = -EFAULT; out: vfree(buffer); @@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, return -EFAULT; if ((maint_io.offset % 4) || - (maint_io.length == 0) || (maint_io.length % 4)) + (maint_io.length == 0) || (maint_io.length % 4) || + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); @@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, return -ENOMEM; length = maint_io.length; - if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { + if (unlikely(copy_from_user(buffer, + (void __user *)(uintptr_t)maint_io.buffer, length))) { ret = -EFAULT; goto out; } @@ -360,7 +364,7 @@ out: */ static int rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, - u32 rioid, u64 raddr, u32 size, + u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport *mport = md->mport; @@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -394,7 +398,7 @@ err_map_outb: static int rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, - u32 rioid, u64 raddr, u32 size, + u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport_mapping *map; @@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) dma_addr_t paddr; int ret; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", @@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) map.handle = paddr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) return -EFAULT; return 0; } @@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) if (!md->mport->ops->unmap_outb) return -EPROTONOSUPPORT; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(OBW, "h=0x%llx", handle); @@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint16_t hdid; + u16 hdid; - if (copy_from_user(&hdid, arg, sizeof(uint16_t))) + if (copy_from_user(&hdid, arg, sizeof(hdid))) return -EFAULT; md->mport->host_deviceid = hdid; @@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint32_t comptag; + u32 comptag; - if (copy_from_user(&comptag, arg, sizeof(uint32_t))) + if (copy_from_user(&comptag, arg, sizeof(comptag))) return -EFAULT; rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); @@ -837,7 +841,7 @@ err_out: * @xfer: data transfer descriptor structure */ static int -rio_dma_transfer(struct file *filp, uint32_t transfer_mode, +rio_dma_transfer(struct file *filp, u32 transfer_mode, enum rio_transfer_sync sync, enum dma_data_direction dir, struct rio_transfer_io *xfer) { @@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode, unsigned long offset; long pinned; - offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; + offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; page_list = kmalloc_array(nr_pages, @@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) return -EFAULT; - if (transaction.count != 1) + if (transaction.count != 1) /* only single transfer for now */ return -EINVAL; if ((transaction.transfer_mode & priv->md->properties.transfer_mode) == 0) return -ENODEV; - transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); + transfer = vmalloc(transaction.count * sizeof(*transfer)); if (!transfer) return -ENOMEM; - if (unlikely(copy_from_user(transfer, transaction.block, - transaction.count * sizeof(struct rio_transfer_io)))) { + if (unlikely(copy_from_user(transfer, + (void __user *)(uintptr_t)transaction.block, + transaction.count * sizeof(*transfer)))) { ret = -EFAULT; goto out_free; } @@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) ret = rio_dma_transfer(filp, transaction.transfer_mode, transaction.sync, dir, &transfer[i]); - if (unlikely(copy_to_user(transaction.block, transfer, - transaction.count * sizeof(struct rio_transfer_io)))) + if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, + transfer, + transaction.count * sizeof(*transfer)))) ret = -EFAULT; out_free: @@ -1129,11 +1135,11 @@ err_tmo: } static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, - uint64_t size, struct rio_mport_mapping **mapping) + u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) struct rio_mport_mapping *mapping = NULL; int ret; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); @@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) map.dma_handle = mapping->phys_addr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { mutex_lock(&md->buf_mutex); kref_put(&mapping->ref, mport_release_mapping); mutex_unlock(&md->buf_mutex); @@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) int ret = -EFAULT; struct rio_mport_mapping *map, *_map; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(EXIT, "filp=%p", filp); @@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) static int rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, - u64 raddr, u32 size, + u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport *mport = md->mport; struct rio_mport_mapping *map; int ret; - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + /* rio_map_inb_region() accepts u32 size */ + if (size > 0xffffffff) + return -EINVAL; + + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, if (raddr == RIO_MAP_ANY_ADDR) raddr = map->phys_addr; - ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); + ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); if (ret < 0) goto err_map_inb; @@ -1288,7 +1298,7 @@ err_dma_alloc: static int rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, - u64 raddr, u32 size, + u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; @@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) if (!md->mport->ops->map_inb) return -EPROTONOSUPPORT; - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); @@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) map.handle = mapping->phys_addr; map.rio_addr = mapping->rio_addr; - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { /* Delete mapping if it was created by this request */ if (ret == 0 && mapping->filp == filp) { mutex_lock(&md->buf_mutex); @@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) if (!md->mport->ops->unmap_inb) return -EPROTONOSUPPORT; - if (copy_from_user(&handle, arg, sizeof(u64))) + if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; mutex_lock(&md->buf_mutex); @@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; - uint32_t port_idx = md->mport->index; + u32 port_idx = md->mport->index; rmcd_debug(MPORT, "port_index=%d", port_idx); @@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, handled = 0; spin_lock(&data->db_lock); list_for_each_entry(db_filter, &data->doorbells, data_node) { - if (((db_filter->filter.rioid == 0xffffffff || + if (((db_filter->filter.rioid == RIO_INVALID_DESTID || db_filter->filter.rioid == src)) && info >= db_filter->filter.low && info <= db_filter->filter.high) { @@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; + if (filter.low > filter.high) + return -EINVAL; + spin_lock_irqsave(&priv->md->db_lock, flags); list_for_each_entry(db_filter, &priv->db_filters, priv_node) { if (db_filter->filter.rioid == filter.rioid && @@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, return -EEXIST; } - size = sizeof(struct rio_dev); + size = sizeof(*rdev); mport = md->mport; - destid = (u16)dev_info.destid; - hopcount = (u8)dev_info.hopcount; + destid = dev_info.destid; + hopcount = dev_info.hopcount; if (rio_mport_read_config_32(mport, destid, hopcount, RIO_PEF_CAR, &rval)) @@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) do { rdev = rio_get_comptag(dev_info.comptag, rdev); if (rdev && rdev->dev.parent == &mport->net->dev && - rdev->destid == (u16)dev_info.destid && - rdev->hopcount == (u8)dev_info.hopcount) + rdev->destid == dev_info.destid && + rdev->hopcount == dev_info.hopcount) break; } while (rdev); } @@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp, return maint_port_idx_get(data, (void __user *)arg); case RIO_MPORT_GET_PROPERTIES: md->properties.hdid = md->mport->host_deviceid; - if (copy_to_user((void __user *)arg, &(data->md->properties), - sizeof(data->md->properties))) + if (copy_to_user((void __user *)arg, &(md->properties), + sizeof(md->properties))) return -EFAULT; return 0; case RIO_ENABLE_DOORBELL_RANGE: @@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp, case RIO_DISABLE_PORTWRITE_RANGE: return rio_mport_remove_pw_filter(data, (void __user *)arg); case RIO_SET_EVENT_MASK: - data->event_mask = arg; + data->event_mask = (u32)arg; return 0; case RIO_GET_EVENT_MASK: if (copy_to_user((void __user *)arg, &data->event_mask, - sizeof(data->event_mask))) + sizeof(u32))) return -EFAULT; return 0; case RIO_MAP_OUTBOUND: @@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf, return -EINVAL; ret = rio_mport_send_doorbell(mport, - (u16)event.u.doorbell.rioid, + event.u.doorbell.rioid, event.u.doorbell.payload); if (ret < 0) return ret; @@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) struct mport_dev *md; struct rio_mport_attr attr; - md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); + md = kzalloc(sizeof(*md), GFP_KERNEL); if (!md) { rmcd_error("Unable allocate a device object"); return NULL; @@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) /* The transfer_mode property will be returned through mport query * interface */ -#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ +#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 14718a9ffcfb..460c855be0d0 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev) return retval; } - -static int usb_port_prepare(struct device *dev) -{ - return 1; -} #endif static const struct dev_pm_ops usb_port_pm_ops = { #ifdef CONFIG_PM .runtime_suspend = usb_port_runtime_suspend, .runtime_resume = usb_port_runtime_resume, - .prepare = usb_port_prepare, #endif }; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index dcb85e3cd5a7..479187c32571 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) static int usb_dev_prepare(struct device *dev) { - struct usb_device *udev = to_usb_device(dev); - - /* Return 0 if the current wakeup setting is wrong, otherwise 1 */ - if (udev->do_remote_wakeup != device_may_wakeup(dev)) - return 0; - - return 1; + return 0; /* Implement eventually? */ } static void usb_dev_complete(struct device *dev) diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5e5a8fa005f8..bc8889956d17 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb) { usb_phy_generic_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); - if (!musb->xceiv) { + if (IS_ERR(musb->xceiv)) { pr_err("HS UDC: no transceiver configured\n"); - return -ENODEV; + return PTR_ERR(musb->xceiv); } /* Silicon does not implement ConfigData register. diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 87bd578799a8..152865b36522 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep) musb_writew(epio, MUSB_RXMAXP, 0); } - musb_ep->desc = NULL; - musb_ep->end_point.desc = NULL; - /* abort all pending DMA and requests */ nuke(musb_ep, -ESHUTDOWN); + musb_ep->desc = NULL; + musb_ep->end_point.desc = NULL; + schedule_work(&musb->irq_work); spin_unlock_irqrestore(&(musb->lock), flags); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 58487a473521..2f8ad7f1f482 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = { .description = "musb-hcd", .product_desc = "MUSB HDRC host driver", .hcd_priv_size = sizeof(struct musb *), - .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, + .flags = HCD_USB2 | HCD_MEMORY, /* not using irq handler or reset hooks from usbcore, since * those must be shared with peripheral code for OTG configs diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index dd47823bb014..7c9f25e9c422 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ + { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ @@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ + { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ @@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ + { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ + { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c802d47892c..ca6bfddaacad 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue( const char *name) { struct virtqueue *vq; - void *queue; + void *queue = NULL; dma_addr_t dma_addr; size_t queue_size_in_bytes; struct vring vring; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 9781e0dd59d6..d46839f51e73 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); static void balloon_process(struct work_struct *work); static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); +static void release_memory_resource(struct resource *resource); + /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ @@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size) return NULL; } +#ifdef CONFIG_SPARSEMEM + { + unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); + unsigned long pfn = res->start >> PAGE_SHIFT; + + if (pfn > limit) { + pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", + pfn, limit); + release_memory_resource(res); + return NULL; + } + } +#endif + return res; } diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 38272ad24551..f4edd6df3df2 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u) { unsigned int new_size; evtchn_port_t *new_ring, *old_ring; - unsigned int p, c; /* * Ensure the ring is large enough to capture all possible @@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u) /* * Copy the old ring contents to the new ring. * - * If the ring contents crosses the end of the current ring, - * it needs to be copied in two chunks. + * To take care of wrapping, a full ring, and the new index + * pointing into the second half, simply copy the old contents + * twice. * * +---------+ +------------------+ - * |34567 12| -> | 1234567 | - * +-----p-c-+ +------------------+ + * |34567 12| -> |34567 1234567 12| + * +-----p-c-+ +-------c------p---+ */ - p = evtchn_ring_offset(u, u->ring_prod); - c = evtchn_ring_offset(u, u->ring_cons); - if (p < c) { - memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); - memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); - } else - memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); + memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); + memcpy(new_ring + u->ring_size, old_ring, + u->ring_size * sizeof(*u->ring)); u->ring = new_ring; u->ring_size = new_size; diff --git a/fs/pnode.c b/fs/pnode.c index c524fdddc7fb..99899705b105 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) /* all accesses are serialized by namespace_sem */ static struct user_namespace *user_ns; -static struct mount *last_dest, *last_source, *dest_master; +static struct mount *last_dest, *first_source, *last_source, *dest_master; static struct mountpoint *mp; static struct hlist_head *list; @@ -221,20 +221,22 @@ static int propagate_one(struct mount *m) type = CL_MAKE_SHARED; } else { struct mount *n, *p; + bool done; for (n = m; ; n = p) { p = n->mnt_master; - if (p == dest_master || IS_MNT_MARKED(p)) { - while (last_dest->mnt_master != p) { - last_source = last_source->mnt_master; - last_dest = last_source->mnt_parent; - } - if (!peers(n, last_dest)) { - last_source = last_source->mnt_master; - last_dest = last_source->mnt_parent; - } + if (p == dest_master || IS_MNT_MARKED(p)) break; - } } + do { + struct mount *parent = last_source->mnt_parent; + if (last_source == first_source) + break; + done = parent->mnt_master == p; + if (done && peers(n, parent)) + break; + last_source = last_source->mnt_master; + } while (!done); + type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) @@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, */ user_ns = current->nsproxy->mnt_ns->user_ns; last_dest = dest_mnt; + first_source = source_mnt; last_source = source_mnt; mp = dest_mp; list = tree_list; diff --git a/fs/proc/base.c b/fs/proc/base.c index b1755b23893e..92e37e224cd2 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf, struct mm_struct *mm = file->private_data; unsigned long env_start, env_end; - if (!mm) + /* Ensure the process spawned far enough to have an environment. */ + if (!mm || !mm->env_end) return 0; page = (char *)__get_free_page(GFP_TEMPORARY); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 14362a84c78e..3a932501d690 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -394,13 +394,13 @@ struct acpi_data_node { static inline bool is_acpi_node(struct fwnode_handle *fwnode) { - return fwnode && (fwnode->type == FWNODE_ACPI + return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI || fwnode->type == FWNODE_ACPI_DATA); } static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) { - return fwnode && fwnode->type == FWNODE_ACPI; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI; } static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index eeae401a2412..3d5202eda22f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -246,7 +246,7 @@ #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #endif -#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) +#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ diff --git a/include/linux/of.h b/include/linux/of.h index 7fcb681baadf..31758036787c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -133,7 +133,7 @@ void of_core_init(void); static inline bool is_of_node(struct fwnode_handle *fwnode) { - return fwnode && fwnode->type == FWNODE_OF; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; } static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f4ed4f1b0c77..6b052aa7b5b7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -517,6 +517,27 @@ static inline int PageTransCompound(struct page *page) } /* + * PageTransCompoundMap is the same as PageTransCompound, but it also + * guarantees the primary MMU has the entire compound page mapped + * through pmd_trans_huge, which in turn guarantees the secondary MMUs + * can also map the entire compound page. This allows the secondary + * MMUs to call get_user_pages() only once for each compound page and + * to immediately map the entire compound page with a single secondary + * MMU fault. If there will be a pmd split later, the secondary MMUs + * will get an update through the MMU notifier invalidation through + * split_huge_pmd(). + * + * Unlike PageTransCompound, this is safe to be called only while + * split_huge_pmd() cannot run from under us, like if protected by the + * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * positives. + */ +static inline int PageTransCompoundMap(struct page *page) +{ + return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; +} + +/* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. @@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page) #else TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) +TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) TESTPAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) diff --git a/include/linux/swap.h b/include/linux/swap.h index 2b83359c19ca..0a4cd4703f40 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void) #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { + /* Cgroup2 doesn't have per-cgroup swappiness */ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return vm_swappiness; + /* root ? */ if (mem_cgroup_disabled() || !memcg->css.parent) return vm_swappiness; diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 730d82ad6ee5..24cd3949a9a4 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -80,6 +80,7 @@ struct netns_xfrm { struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; + atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 4f543262dd81..9d14f707e534 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -112,15 +112,6 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) return iptunnel_handle_offloads(skb, type); } -static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) -{ - struct udphdr *uh; - - uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr)); - skb_shinfo(skb)->gso_type |= uh->check ? - SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; -} - static inline void udp_tunnel_encap_enable(struct socket *sock) { #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 6e0f5f01734c..c51afb71bfab 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -718,9 +718,9 @@ __SYSCALL(__NR_mlock2, sys_mlock2) #define __NR_copy_file_range 285 __SYSCALL(__NR_copy_file_range, sys_copy_file_range) #define __NR_preadv2 286 -__SYSCALL(__NR_preadv2, sys_preadv2) +__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2) #define __NR_pwritev2 287 -__SYSCALL(__NR_pwritev2, sys_pwritev2) +__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2) #undef __NR_syscalls #define __NR_syscalls 288 diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 897a94942245..f7d4831a2cc7 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -19,6 +19,8 @@ #define MACSEC_MAX_KEY_LEN 128 +#define MACSEC_KEYID_LEN 16 + #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL #define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL @@ -79,7 +81,7 @@ enum macsec_sa_attrs { MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */ MACSEC_SA_ATTR_PN, /* config/dump, u32 */ MACSEC_SA_ATTR_KEY, /* config, data */ - MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */ + MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */ MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ MACSEC_SA_ATTR_PAD, __MACSEC_SA_ATTR_END, diff --git a/include/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h index b65d19df76d2..5796bf1d06ad 100644 --- a/include/linux/rio_mport_cdev.h +++ b/include/uapi/linux/rio_mport_cdev.h @@ -39,16 +39,16 @@ #ifndef _RIO_MPORT_CDEV_H_ #define _RIO_MPORT_CDEV_H_ -#ifndef __user -#define __user -#endif +#include <linux/ioctl.h> +#include <linux/types.h> struct rio_mport_maint_io { - uint32_t rioid; /* destID of remote device */ - uint32_t hopcount; /* hopcount to remote device */ - uint32_t offset; /* offset in register space */ - size_t length; /* length in bytes */ - void __user *buffer; /* data buffer */ + __u16 rioid; /* destID of remote device */ + __u8 hopcount; /* hopcount to remote device */ + __u8 pad0[5]; + __u32 offset; /* offset in register space */ + __u32 length; /* length in bytes */ + __u64 buffer; /* pointer to data buffer */ }; /* @@ -66,22 +66,23 @@ struct rio_mport_maint_io { #define RIO_CAP_MAP_INB (1 << 7) struct rio_mport_properties { - uint16_t hdid; - uint8_t id; /* Physical port ID */ - uint8_t index; - uint32_t flags; - uint32_t sys_size; /* Default addressing size */ - uint8_t port_ok; - uint8_t link_speed; - uint8_t link_width; - uint32_t dma_max_sge; - uint32_t dma_max_size; - uint32_t dma_align; - uint32_t transfer_mode; /* Default transfer mode */ - uint32_t cap_sys_size; /* Capable system sizes */ - uint32_t cap_addr_size; /* Capable addressing sizes */ - uint32_t cap_transfer_mode; /* Capable transfer modes */ - uint32_t cap_mport; /* Mport capabilities */ + __u16 hdid; + __u8 id; /* Physical port ID */ + __u8 index; + __u32 flags; + __u32 sys_size; /* Default addressing size */ + __u8 port_ok; + __u8 link_speed; + __u8 link_width; + __u8 pad0; + __u32 dma_max_sge; + __u32 dma_max_size; + __u32 dma_align; + __u32 transfer_mode; /* Default transfer mode */ + __u32 cap_sys_size; /* Capable system sizes */ + __u32 cap_addr_size; /* Capable addressing sizes */ + __u32 cap_transfer_mode; /* Capable transfer modes */ + __u32 cap_mport; /* Mport capabilities */ }; /* @@ -93,54 +94,57 @@ struct rio_mport_properties { #define RIO_PORTWRITE (1 << 1) struct rio_doorbell { - uint32_t rioid; - uint16_t payload; + __u16 rioid; + __u16 payload; }; struct rio_doorbell_filter { - uint32_t rioid; /* 0xffffffff to match all ids */ - uint16_t low; - uint16_t high; + __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */ + __u16 low; + __u16 high; + __u16 pad0; }; struct rio_portwrite { - uint32_t payload[16]; + __u32 payload[16]; }; struct rio_pw_filter { - uint32_t mask; - uint32_t low; - uint32_t high; + __u32 mask; + __u32 low; + __u32 high; + __u32 pad0; }; /* RapidIO base address for inbound requests set to value defined below * indicates that no specific RIO-to-local address translation is requested * and driver should use direct (one-to-one) address mapping. */ -#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) +#define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0)) struct rio_mmap { - uint32_t rioid; - uint64_t rio_addr; - uint64_t length; - uint64_t handle; - void *address; + __u16 rioid; + __u16 pad0[3]; + __u64 rio_addr; + __u64 length; + __u64 handle; + __u64 address; }; struct rio_dma_mem { - uint64_t length; /* length of DMA memory */ - uint64_t dma_handle; /* handle associated with this memory */ - void *buffer; /* pointer to this memory */ + __u64 length; /* length of DMA memory */ + __u64 dma_handle; /* handle associated with this memory */ + __u64 address; }; - struct rio_event { - unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ + __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ union { struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ } u; + __u32 pad0; }; enum rio_transfer_sync { @@ -184,35 +188,37 @@ enum rio_exchange { }; struct rio_transfer_io { - uint32_t rioid; /* Target destID */ - uint64_t rio_addr; /* Address in target's RIO mem space */ - enum rio_exchange method; /* Data exchange method */ - void __user *loc_addr; - uint64_t handle; - uint64_t offset; /* Offset in buffer */ - uint64_t length; /* Length in bytes */ - uint32_t completion_code; /* Completion code for this transfer */ + __u64 rio_addr; /* Address in target's RIO mem space */ + __u64 loc_addr; + __u64 handle; + __u64 offset; /* Offset in buffer */ + __u64 length; /* Length in bytes */ + __u16 rioid; /* Target destID */ + __u16 method; /* Data exchange method, one of rio_exchange enum */ + __u32 completion_code; /* Completion code for this transfer */ }; struct rio_transaction { - uint32_t transfer_mode; /* Data transfer mode */ - enum rio_transfer_sync sync; /* Synchronization method */ - enum rio_transfer_dir dir; /* Transfer direction */ - size_t count; /* Number of transfers */ - struct rio_transfer_io __user *block; /* Array of <count> transfers */ + __u64 block; /* Pointer to array of <count> transfers */ + __u32 count; /* Number of transfers */ + __u32 transfer_mode; /* Data transfer mode */ + __u16 sync; /* Synch method, one of rio_transfer_sync enum */ + __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */ + __u32 pad0; }; struct rio_async_tx_wait { - uint32_t token; /* DMA transaction ID token */ - uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ + __u32 token; /* DMA transaction ID token */ + __u32 timeout; /* Wait timeout in msec, if 0 use default TO */ }; #define RIO_MAX_DEVNAME_SZ 20 struct rio_rdev_info { - uint32_t destid; - uint8_t hopcount; - uint32_t comptag; + __u16 destid; + __u8 hopcount; + __u8 pad0; + __u32 comptag; char name[RIO_MAX_DEVNAME_SZ + 1]; }; @@ -220,11 +226,11 @@ struct rio_rdev_info { #define RIO_MPORT_DRV_MAGIC 'm' #define RIO_MPORT_MAINT_HDID_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) + _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16) #define RIO_MPORT_MAINT_COMPTAG_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) + _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32) #define RIO_MPORT_MAINT_PORT_IDX_GET \ - _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) + _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32) #define RIO_MPORT_GET_PROPERTIES \ _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) #define RIO_MPORT_MAINT_READ_LOCAL \ @@ -244,9 +250,9 @@ struct rio_rdev_info { #define RIO_DISABLE_PORTWRITE_RANGE \ _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) #define RIO_SET_EVENT_MASK \ - _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) + _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32) #define RIO_GET_EVENT_MASK \ - _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) + _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32) #define RIO_MAP_OUTBOUND \ _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) #define RIO_UNMAP_OUTBOUND \ @@ -254,11 +260,11 @@ struct rio_rdev_info { #define RIO_MAP_INBOUND \ _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) #define RIO_UNMAP_INBOUND \ - _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) + _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64) #define RIO_ALLOC_DMA \ _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) #define RIO_FREE_DMA \ - _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) + _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64) #define RIO_TRANSFER \ _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) #define RIO_WAIT_FOR_ASYNC \ diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 3f10e5317b46..8f3a8f606fd9 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -45,9 +45,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) { -#ifdef __HAVE_BUILTIN_BSWAP16__ - return __builtin_bswap16(val); -#elif defined (__arch_swab16) +#if defined (__arch_swab16) return __arch_swab16(val); #else return ___constant_swab16(val); @@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) static inline __attribute_const__ __u32 __fswab32(__u32 val) { -#ifdef __HAVE_BUILTIN_BSWAP32__ - return __builtin_bswap32(val); -#elif defined(__arch_swab32) +#if defined(__arch_swab32) return __arch_swab32(val); #else return ___constant_swab32(val); @@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val) static inline __attribute_const__ __u64 __fswab64(__u64 val) { -#ifdef __HAVE_BUILTIN_BSWAP64__ - return __builtin_bswap64(val); -#elif defined (__arch_swab64) +#if defined (__arch_swab64) return __arch_swab64(val); #elif defined(__SWAB_64_THRU_32__) __u32 h = val >> 32; @@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) * __swab16 - return a byteswapped 16-bit value * @x: value to byteswap */ +#ifdef __HAVE_BUILTIN_BSWAP16__ +#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) +#else #define __swab16(x) \ (__builtin_constant_p((__u16)(x)) ? \ ___constant_swab16(x) : \ __fswab16(x)) +#endif /** * __swab32 - return a byteswapped 32-bit value * @x: value to byteswap */ +#ifdef __HAVE_BUILTIN_BSWAP32__ +#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) +#else #define __swab32(x) \ (__builtin_constant_p((__u32)(x)) ? \ ___constant_swab32(x) : \ __fswab32(x)) +#endif /** * __swab64 - return a byteswapped 64-bit value * @x: value to byteswap */ +#ifdef __HAVE_BUILTIN_BSWAP64__ +#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) +#else #define __swab64(x) \ (__builtin_constant_p((__u64)(x)) ? \ ___constant_swab64(x) : \ __fswab64(x)) +#endif /** * __swahw32 - return a word-swapped 32-bit value diff --git a/include/xen/page.h b/include/xen/page.h index 96294ac93755..9dc46cb8a0fd 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -15,9 +15,9 @@ */ #define xen_pfn_to_page(xen_pfn) \ - ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) + (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT))) #define page_to_xen_pfn(page) \ - (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) + ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT)) #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8b489fcac37b..d1f7149f8704 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq) return false; /* - * FIFO realtime policy runs the highest priority task (after DEADLINE). - * Other runnable tasks are of a lower priority. The scheduler tick - * isn't needed. - */ - fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; - if (fifo_nr_running) - return true; - - /* - * Round-robin realtime tasks time slice with other tasks at the same - * realtime priority. + * If there are more than one RR tasks, we need the tick to effect the + * actual RR behaviour. */ if (rq->rt.rr_nr_running) { if (rq->rt.rr_nr_running == 1) @@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq) return false; } - /* Normal multitasking need periodic preemption checks */ - if (rq->cfs.nr_running > 1) + /* + * If there's no RR tasks, but FIFO tasks, we can skip the tick, no + * forced preemption between FIFO tasks. + */ + fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; + if (fifo_nr_running) + return true; + + /* + * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; + * if there's more than one we need the tick for involuntary + * preemption. + */ + if (rq->nr_running > 1) return false; return true; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ced963049e0a..b7b0760ba6ee 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2113,8 +2113,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) trace_create_file("filter", 0644, file->dir, file, &ftrace_event_filter_fops); - trace_create_file("trigger", 0644, file->dir, file, - &event_trigger_fops); + /* + * Only event directories that can be enabled should have + * triggers. + */ + if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) + trace_create_file("trigger", 0644, file->dir, file, + &event_trigger_fops); trace_create_file("format", 0444, file->dir, call, &ftrace_event_format_fops); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 9e0b0315a724..53ad6c0831ae 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -42,12 +42,14 @@ #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) +#define STACK_ALLOC_NULL_PROTECTION_BITS 1 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) #define STACK_ALLOC_ALIGN 4 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ STACK_ALLOC_ALIGN) -#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) +#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ + STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) #define STACK_ALLOC_SLABS_CAP 1024 #define STACK_ALLOC_MAX_SLABS \ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ @@ -59,6 +61,7 @@ union handle_parts { struct { u32 slabindex : STACK_ALLOC_INDEX_BITS; u32 offset : STACK_ALLOC_OFFSET_BITS; + u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; }; }; @@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, stack->size = size; stack->handle.slabindex = depot_index; stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; + stack->handle.valid = 1; memcpy(stack->entries, entries, size * sizeof(unsigned long)); depot_offset += required_size; diff --git a/mm/compaction.c b/mm/compaction.c index ccf97b02b85f..8fa254043801 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, ISOLATE_UNEVICTABLE); - /* - * In case of fatal failure, release everything that might - * have been isolated in the previous iteration, and signal - * the failure back to caller. - */ - if (!pfn) { - putback_movable_pages(&cc->migratepages); - cc->nr_migratepages = 0; + if (!pfn) break; - } if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) break; @@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node) static inline bool kcompactd_work_requested(pg_data_t *pgdat) { - return pgdat->kcompactd_max_order > 0; + return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); } static bool kcompactd_node_suitable(pg_data_t *pgdat) @@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); + if (kthread_should_stop()) + return; status = compact_zone(zone, &cc); if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), diff --git a/mm/huge_memory.c b/mm/huge_memory.c index df67b53ae3c5..f7daa7de8f48 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3452,7 +3452,7 @@ next: } } - pr_info("%lu of %lu THP split", split, total); + pr_info("%lu of %lu THP split\n", split, total); return 0; } @@ -3463,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void) { void *ret; - ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, + ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, &split_huge_pages_fops); if (!ret) pr_warn("Failed to create split_huge_pages in debugfs"); diff --git a/mm/memory.c b/mm/memory.c index 305537fc8640..52c218e2b724 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1222,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { -#ifdef CONFIG_DEBUG_VM - if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { - pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", - __func__, addr, end, - vma->vm_start, - vma->vm_end); - BUG(); - } -#endif + VM_BUG_ON_VMA(vma_is_anonymous(vma) && + !rwsem_is_locked(&tlb->mm->mmap_sem), vma); split_huge_pmd(vma, pmd, addr); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 999792d35ccc..bc5149d5ec38 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (gdtc->dirty > gdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) + if (wb_stat(wb, WB_RECLAIMABLE) > + wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) return true; if (mdtc) { @@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (mdtc->dirty > mdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) + if (wb_stat(wb, WB_RECLAIMABLE) > + wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) return true; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59de90d5d3a3..c1069efcc4d7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void) setup_per_zone_inactive_ratio(); return 0; } -module_init(init_per_zone_wmark_min) +core_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/zswap.c b/mm/zswap.c index 91dad80d068b..de0f119b1780 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; static LIST_HEAD(zswap_pools); /* protects zswap_pools list modification */ static DEFINE_SPINLOCK(zswap_pools_lock); +/* pool counter to provide unique names to zpool */ +static atomic_t zswap_pools_count = ATOMIC_INIT(0); /* used by param callback function */ static bool zswap_init_started; @@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { struct zswap_pool *pool; + char name[38]; /* 'zswap' + 32 char (max) num + \0 */ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; pool = kzalloc(sizeof(*pool), GFP_KERNEL); @@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) return NULL; } - pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); + /* unique name for each pool specifically required by zsmalloc */ + snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); + + pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); if (!pool->zpool) { pr_err("%s zpool not available\n", type); goto error; diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index f8fc6241469a..d99b2009771a 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -21,18 +21,19 @@ #include <asm/uaccess.h> #include "br_private.h" -/* called with RTNL */ static int get_bridge_ifindices(struct net *net, int *indices, int num) { struct net_device *dev; int i = 0; - for_each_netdev(net, dev) { + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { if (i >= num) break; if (dev->priv_flags & IFF_EBRIDGE) indices[i++] = dev->ifindex; } + rcu_read_unlock(); return i; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 191ea66e4d92..6852f3c7009c 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1279,6 +1279,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, struct br_ip saddr; unsigned long max_delay; unsigned long now = jiffies; + unsigned int offset = skb_transport_offset(skb); __be32 group; int err = 0; @@ -1289,14 +1290,14 @@ static int br_ip4_multicast_query(struct net_bridge *br, group = ih->group; - if (skb->len == sizeof(*ih)) { + if (skb->len == offset + sizeof(*ih)) { max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); if (!max_delay) { max_delay = 10 * HZ; group = 0; } - } else if (skb->len >= sizeof(*ih3)) { + } else if (skb->len >= offset + sizeof(*ih3)) { ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) goto out; @@ -1357,6 +1358,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, struct br_ip saddr; unsigned long max_delay; unsigned long now = jiffies; + unsigned int offset = skb_transport_offset(skb); const struct in6_addr *group = NULL; bool is_general_query; int err = 0; @@ -1366,8 +1368,8 @@ static int br_ip6_multicast_query(struct net_bridge *br, (port && port->state == BR_STATE_DISABLED)) goto out; - if (skb->len == sizeof(*mld)) { - if (!pskb_may_pull(skb, sizeof(*mld))) { + if (skb->len == offset + sizeof(*mld)) { + if (!pskb_may_pull(skb, offset + sizeof(*mld))) { err = -EINVAL; goto out; } @@ -1376,7 +1378,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, if (max_delay) group = &mld->mld_mca; } else { - if (!pskb_may_pull(skb, sizeof(*mld2q))) { + if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { err = -EINVAL; goto out; } diff --git a/net/core/flow.c b/net/core/flow.c index 1033725be40b..3937b1b68d5b 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work) list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); spin_unlock_bh(&xfrm->flow_cache_gc_lock); - list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) + list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { flow_entry_kill(fce, xfrm); + atomic_dec(&xfrm->flow_cache_gc_count); + WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0); + } } static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, @@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, struct netns_xfrm *xfrm) { if (deleted) { + atomic_add(deleted, &xfrm->flow_cache_gc_count); fcp->hash_count -= deleted; spin_lock_bh(&xfrm->flow_cache_gc_lock); list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); @@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, if (fcp->hash_count > fc->high_watermark) flow_cache_shrink(fc, fcp); + if (fcp->hash_count > 2 * fc->high_watermark || + atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { + atomic_inc(&net->xfrm.flow_cache_genid); + flo = ERR_PTR(-ENOBUFS); + goto ret_object; + } + fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { fle->net = net; @@ -446,6 +457,7 @@ int flow_cache_init(struct net *net) INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); mutex_init(&net->xfrm.flow_flush_sem); + atomic_set(&net->xfrm.flow_cache_gc_count, 0); fc->hash_shift = 10; fc->low_watermark = 2 * flow_cache_hash_size(fc); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d471f097c739..d69c4644f8f2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1173,14 +1173,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) { - struct rtnl_link_ifmap map = { - .mem_start = dev->mem_start, - .mem_end = dev->mem_end, - .base_addr = dev->base_addr, - .irq = dev->irq, - .dma = dev->dma, - .port = dev->if_port, - }; + struct rtnl_link_ifmap map; + + memset(&map, 0, sizeof(map)); + map.mem_start = dev->mem_start; + map.mem_end = dev->mem_end; + map.base_addr = dev->base_addr; + map.irq = dev->irq; + map.dma = dev->dma; + map.port = dev->if_port; + if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) return -EMSGSIZE; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 7ac5ec87b004..eeec7d60e5fd 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -227,8 +227,6 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, int err = -ENOSYS; const struct net_offload **offloads; - udp_tunnel_gro_complete(skb, nhoff); - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -237,6 +235,8 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, err = ops->callbacks.gro_complete(skb, nhoff); + skb_set_inner_mac_header(skb, nhoff); + out_unlock: rcu_read_unlock(); @@ -412,6 +412,8 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) err = ops->callbacks.gro_complete(skb, nhoff + guehlen); + skb_set_inner_mac_header(skb, nhoff + guehlen); + out_unlock: rcu_read_unlock(); return err; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 5cf10b777b7e..a917903d5e97 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; /* Device to other host */ int err; + int mtu; if (!dst) { dev->stats.tx_carrier_errors++; @@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, tunnel->err_count = 0; } + mtu = dst_mtu(dst); + if (skb->len > mtu) { + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); + if (skb->protocol == htons(ETH_P_IP)) { + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + } else { + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + } + + dst_release(dst); + goto tx_error; + } + skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 097060def7f0..6b7459c92bb2 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -350,6 +350,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, uh->len = newlen; + /* Set encapsulation before calling into inner gro_complete() functions + * to make them set up the inner offsets. + */ + skb->encapsulation = 1; + rcu_read_lock(); sk = (*lookup)(skb, uh->source, uh->dest); if (sk && udp_sk(sk)->gro_complete) @@ -360,9 +365,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; - skb->encapsulation = 1; - skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); - return err; } EXPORT_SYMBOL(udp_gro_complete); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 9554b99a8508..4527285fcaa2 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -446,6 +446,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) if (__ipv6_addr_needs_scope_id(addr_type)) iif = skb->dev->ifindex; + else + iif = l3mdev_master_ifindex(skb->dev); /* * Must not send error if the source does not uniquely @@ -500,9 +502,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - if (!fl6.flowi6_oif) - fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev); - dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7bdc9c9c231b..c4efaa97280c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -810,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); - else + else { + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) + oif = skb->skb_iif; + fl6.flowi6_oif = oif; + } + fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b3c52e3f689a..8ae3ed97d95c 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb) if (llc->cmsg_flags & LLC_CMSG_PKTINFO) { struct llc_pktinfo info; + memset(&info, 0, sizeof(info)); info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex; llc_pdu_decode_dsap(skb, &info.lpi_sap); llc_pdu_decode_da(skb, info.lpi_mac); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 3dce53ebea92..b5f1221f48d4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1808,27 +1808,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, else if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; - if (copied > 0) { - /* We only do these additional bookkeeping/notification steps - * if we actually copied something out of the queue pair - * instead of just peeking ahead. - */ - - if (!(flags & MSG_PEEK)) { - /* If the other side has shutdown for sending and there - * is nothing more to read, then modify the socket - * state. - */ - if (vsk->peer_shutdown & SEND_SHUTDOWN) { - if (vsock_stream_has_data(vsk) <= 0) { - sk->sk_state = SS_UNCONNECTED; - sock_set_flag(sk, SOCK_DONE); - sk->sk_state_change(sk); - } - } - } + if (copied > 0) err = copied; - } out: release_sock(sk); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index ff4a91fcab9f..637387bbaaea 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err) skb_dst_force(skb); + /* Inner headers are invalid now. */ + skb->encapsulation = 0; + err = x->type->output(x, skb); if (err == -EINPROGRESS) goto out; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 161dd0d67da8..a9155077feef 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size, do_usb_entry_multi(symval + i, mod); } +static void do_of_entry_multi(void *symval, struct module *mod) +{ + char alias[500]; + int len; + char *tmp; + + DEF_FIELD_ADDR(symval, of_device_id, name); + DEF_FIELD_ADDR(symval, of_device_id, type); + DEF_FIELD_ADDR(symval, of_device_id, compatible); + + len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", + (*type)[0] ? *type : "*"); + + if (compatible[0]) + sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", + *compatible); + + /* Replace all whitespace with underscores */ + for (tmp = alias; tmp && *tmp; tmp++) + if (isspace(*tmp)) + *tmp = '_'; + + buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); + strcat(alias, "C"); + add_wildcard(alias); + buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); +} + +static void do_of_table(void *symval, unsigned long size, + struct module *mod) +{ + unsigned int i; + const unsigned long id_size = SIZE_of_device_id; + + device_id_check(mod->name, "of", size, id_size, symval); + + /* Leave last one: it's the terminator. */ + size -= id_size; + + for (i = 0; i < size; i += id_size) + do_of_entry_multi(symval + i, mod); +} + /* Looks like: hid:bNvNpN */ static int do_hid_entry(const char *filename, void *symval, char *alias) @@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename, } ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry); -static int do_of_entry (const char *filename, void *symval, char *alias) -{ - int len; - char *tmp; - DEF_FIELD_ADDR(symval, of_device_id, name); - DEF_FIELD_ADDR(symval, of_device_id, type); - DEF_FIELD_ADDR(symval, of_device_id, compatible); - - len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", - (*type)[0] ? *type : "*"); - - if (compatible[0]) - sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", - *compatible); - - /* Replace all whitespace with underscores */ - for (tmp = alias; tmp && *tmp; tmp++) - if (isspace (*tmp)) - *tmp = '_'; - - return 1; -} -ADD_TO_DEVTABLE("of", of_device_id, do_of_entry); - static int do_vio_entry(const char *filename, void *symval, char *alias) { @@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, /* First handle the "special" cases */ if (sym_is(name, namelen, "usb")) do_usb_table(symval, sym->st_size, mod); + if (sym_is(name, namelen, "of")) + do_of_table(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp")) do_pnp_device_entry(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp_card")) diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index be09e2cacf82..3cd0a58672dd 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -884,10 +884,10 @@ static char *func_tokens[] = { "BPRM_CHECK", "MODULE_CHECK", "FIRMWARE_CHECK", + "POST_SETATTR", "KEXEC_KERNEL_CHECK", "KEXEC_INITRAMFS_CHECK", - "POLICY_CHECK", - "POST_SETATTR" + "POLICY_CHECK" }; void *ima_policy_start(struct seq_file *m, loff_t *pos) diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index 5b3241340945..544b05a53b70 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -98,6 +98,9 @@ static char *get_klog_buff(unsigned int *klen) char *buff; len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0); + if (len < 0) + return NULL; + buff = malloc(len); if (!buff) return NULL; |