diff options
198 files changed, 1770 insertions, 1254 deletions
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com> Juha Yrjola <juha.yrjola@solidboot.com> Kay Sievers <kay.sievers@vrfy.org> Kenneth W Chen <kenneth.w.chen@intel.com> +Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> Koushik <raghavendra.koushik@neterion.com> Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Leonid I Ananiev <leonid.i.ananiev@intel.com> diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 4223c2d3b508..cfd31d94c872 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -26,6 +26,12 @@ Supported chips: Datasheet: Publicly available at the Texas Instruments website http://www.ti.com/ + * Texas Instruments INA231 + Prefix: 'ina231' + Addresses: I2C 0x40 - 0x4f + Datasheet: Publicly available at the Texas Instruments website + http://www.ti.com/ + Author: Lothar Felten <l-felten@ti.com> Description @@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage. The INA226 is a current shunt and power monitor with an I2C interface. The INA226 monitors both a shunt voltage drop and bus supply voltage. -The INA230 is a high or low side current shunt and power monitor with an I2C -interface. The INA230 monitors both a shunt voltage drop and bus supply voltage. +INA230 and INA231 are high or low side current shunt and power monitors +with an I2C interface. The chips monitor both a shunt voltage drop and +bus supply voltage. -The shunt value in micro-ohms can be set via platform data or device tree. -Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings +The shunt value in micro-ohms can be set via platform data or device tree at +compile-time or via the shunt_resistor attribute in sysfs at run-time. Please +refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings if the device tree is used. + +Additionally ina226 supports update_interval attribute as described in +Documentation/hwmon/sysfs-interface. Internally the interval is the sum of +bus and shunt voltage conversion times multiplied by the averaging rate. We +don't touch the conversion times and only modify the number of averages. The +lower limit of the update_interval is 2 ms, the upper limit is 2253 ms. +The actual programmed interval may vary from the desired value. diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt index c6af4bac5aa8..54f10478e8e3 100644 --- a/Documentation/networking/netlink_mmap.txt +++ b/Documentation/networking/netlink_mmap.txt @@ -199,16 +199,9 @@ frame header. TX limitations -------------- -Kernel processing usually involves validation of the message received by -user-space, then processing its contents. The kernel must assure that -userspace is not able to modify the message contents after they have been -validated. In order to do so, the message is copied from the ring frame -to an allocated buffer if either of these conditions is false: - -- only a single mapping of the ring exists -- the file descriptor is not shared between processes - -This means that for threaded programs, the kernel will fall back to copying. +As of Jan 2015 the message is always copied from the ring frame to an +allocated buffer due to unresolved security concerns. +See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX."). Example ------- diff --git a/MAINTAINERS b/MAINTAINERS index aaa039dee999..d66a97dd3a12 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4953,6 +4953,16 @@ F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ +INTEL ASoC BDW/HSW DRIVERS +M: Jie Yang <yang.jie@linux.intel.com> +L: alsa-devel@alsa-project.org +S: Supported +F: sound/soc/intel/sst-haswell* +F: sound/soc/intel/sst-dsp* +F: sound/soc/intel/sst-firmware.c +F: sound/soc/intel/broadwell.c +F: sound/soc/intel/haswell.c + INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> @@ -9241,7 +9251,6 @@ F: drivers/net/ethernet/dlink/sundance.c SUPERH L: linux-sh@vger.kernel.org -W: http://www.linux-sh.org Q: http://patchwork.kernel.org/project/linux-sh/list/ S: Orphan F: Documentation/sh/ @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Diseased Newt # *DOCUMENTATION* diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 68be9017593d..132c70e2d2f1 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -263,16 +263,37 @@ restart: adr r0, LC0 * OK... Let's do some funky business here. * If we do have a DTB appended to zImage, and we do have * an ATAG list around, we want the later to be translated - * and folded into the former here. To be on the safe side, - * let's temporarily move the stack away into the malloc - * area. No GOT fixup has occurred yet, but none of the - * code we're about to call uses any global variable. + * and folded into the former here. No GOT fixup has occurred + * yet, but none of the code we're about to call uses any + * global variable. */ - add sp, sp, #0x10000 + + /* Get the initial DTB size */ + ldr r5, [r6, #4] +#ifndef __ARMEB__ + /* convert to little endian */ + eor r1, r5, r5, ror #16 + bic r1, r1, #0x00ff0000 + mov r5, r5, ror #8 + eor r5, r5, r1, lsr #8 +#endif + /* 50% DTB growth should be good enough */ + add r5, r5, r5, lsr #1 + /* preserve 64-bit alignment */ + add r5, r5, #7 + bic r5, r5, #7 + /* clamp to 32KB min and 1MB max */ + cmp r5, #(1 << 15) + movlo r5, #(1 << 15) + cmp r5, #(1 << 20) + movhi r5, #(1 << 20) + /* temporarily relocate the stack past the DTB work space */ + add sp, sp, r5 + stmfd sp!, {r0-r3, ip, lr} mov r0, r8 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bl atags_to_fdt /* @@ -285,11 +306,11 @@ restart: adr r0, LC0 bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bleq atags_to_fdt ldmfd sp!, {r0-r3, ip, lr} - sub sp, sp, #0x10000 + sub sp, sp, r5 #endif mov r8, r6 @ use the appended device tree @@ -306,7 +327,7 @@ restart: adr r0, LC0 subs r1, r5, r1 addhi r9, r9, r1 - /* Get the dtb's size */ + /* Get the current DTB size */ ldr r5, [r6, #4] #ifndef __ARMEB__ /* convert r5 (dtb size) to little endian */ diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index b8168f1f8139..24ff27049ce0 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -368,7 +368,7 @@ }; i2s1: i2s@13960000 { - compatible = "samsung,s5pv210-i2s"; + compatible = "samsung,s3c6410-i2s"; reg = <0x13960000 0x100>; clocks = <&clock CLK_I2S1>; clock-names = "iis"; @@ -378,7 +378,7 @@ }; i2s2: i2s@13970000 { - compatible = "samsung,s5pv210-i2s"; + compatible = "samsung,s3c6410-i2s"; reg = <0x13970000 0x100>; clocks = <&clock CLK_I2S2>; clock-names = "iis"; diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index 2260f1855820..8944f4991c3c 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -22,10 +22,12 @@ __invalid_entry: v7m_exception_entry +#ifdef CONFIG_PRINTK adr r0, strerr mrs r1, ipsr mov r2, lr bl printk +#endif mov r0, sp bl show_regs 1: b 1b diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 03823e784f63..c43c71455566 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN config ARM_KERNMEM_PERMS bool "Restrict kernel memory permissions" + depends on MMU help If this is set, kernel memory other than kernel text (and rodata) will be made non-executable. The tradeoff is that each region is diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 91892569710f..845769e41332 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); for_each_possible_cpu(i) { - if (i == cpu) { - asid = 0; - } else { - asid = atomic64_xchg(&per_cpu(active_asids, i), 0); - /* - * If this CPU has already been through a - * rollover, but hasn't run another task in - * the meantime, we must preserve its reserved - * ASID, as this is the only trace we have of - * the process it is still running. - */ - if (asid == 0) - asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); - } + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); per_cpu(reserved_asids, i) = asid; } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index a673c7f7e208..903dba064a03 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2048,6 +2048,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; + if (!mapping) + return; + __arm_iommu_detach_device(dev); arm_iommu_release_mapping(mapping); } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3289969ee423..843713c05b79 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2656,27 +2656,21 @@ config TRAD_SIGNALS bool config MIPS32_COMPAT - bool "Kernel support for Linux/MIPS 32-bit binary compatibility" - depends on 64BIT - help - Select this option if you want Linux/MIPS 32-bit binary - compatibility. Since all software available for Linux/MIPS is - currently 32-bit you should say Y here. + bool config COMPAT bool - depends on MIPS32_COMPAT - select ARCH_WANT_OLD_COMPAT_IPC - default y config SYSVIPC_COMPAT bool - depends on COMPAT && SYSVIPC - default y config MIPS32_O32 bool "Kernel support for o32 binaries" - depends on MIPS32_COMPAT + depends on 64BIT + select ARCH_WANT_OLD_COMPAT_IPC + select COMPAT + select MIPS32_COMPAT + select SYSVIPC_COMPAT if SYSVIPC help Select this option if you want to run o32 binaries. These are pure 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of @@ -2686,7 +2680,10 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" - depends on MIPS32_COMPAT + depends on 64BIT + select COMPAT + select MIPS32_COMPAT + select SYSVIPC_COMPAT if SYSVIPC help Select this option if you want to run n32 binaries. These are 64-bit binaries using 32-bit quantities for addressing and certain diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c index 8585078ae50e..2a4c52e27f41 100644 --- a/arch/mips/boot/elf2ecoff.c +++ b/arch/mips/boot/elf2ecoff.c @@ -49,7 +49,8 @@ /* * Some extra ELF definitions */ -#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ +#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ +#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */ /* -------------------------------------------------------------------- */ @@ -349,39 +350,46 @@ int main(int argc, char *argv[]) for (i = 0; i < ex.e_phnum; i++) { /* Section types we can ignore... */ - if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || - ph[i].p_type == PT_PHDR - || ph[i].p_type == PT_MIPS_REGINFO) + switch (ph[i].p_type) { + case PT_NULL: + case PT_NOTE: + case PT_PHDR: + case PT_MIPS_REGINFO: + case PT_MIPS_ABIFLAGS: continue; - /* Section types we can't handle... */ - else if (ph[i].p_type != PT_LOAD) { - fprintf(stderr, - "Program header %d type %d can't be converted.\n", - ex.e_phnum, ph[i].p_type); - exit(1); - } - /* Writable (data) segment? */ - if (ph[i].p_flags & PF_W) { - struct sect ndata, nbss; - ndata.vaddr = ph[i].p_vaddr; - ndata.len = ph[i].p_filesz; - nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; - nbss.len = ph[i].p_memsz - ph[i].p_filesz; + case PT_LOAD: + /* Writable (data) segment? */ + if (ph[i].p_flags & PF_W) { + struct sect ndata, nbss; + + ndata.vaddr = ph[i].p_vaddr; + ndata.len = ph[i].p_filesz; + nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; + nbss.len = ph[i].p_memsz - ph[i].p_filesz; - combine(&data, &ndata, 0); - combine(&bss, &nbss, 1); - } else { - struct sect ntxt; + combine(&data, &ndata, 0); + combine(&bss, &nbss, 1); + } else { + struct sect ntxt; - ntxt.vaddr = ph[i].p_vaddr; - ntxt.len = ph[i].p_filesz; + ntxt.vaddr = ph[i].p_vaddr; + ntxt.len = ph[i].p_filesz; - combine(&text, &ntxt, 0); + combine(&text, &ntxt, 0); + } + /* Remember the lowest segment start address. */ + if (ph[i].p_vaddr < cur_vma) + cur_vma = ph[i].p_vaddr; + break; + + default: + /* Section types we can't handle... */ + fprintf(stderr, + "Program header %d type %d can't be converted.\n", + ex.e_phnum, ph[i].p_type); + exit(1); } - /* Remember the lowest segment start address. */ - if (ph[i].p_vaddr < cur_vma) - cur_vma = ph[i].p_vaddr; } /* Sections must be in order to be converted... */ diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index ecd903dd1c45..8b1eeffa12ed 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -240,9 +240,7 @@ static int octeon_cpu_disable(void) set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); - local_irq_disable(); octeon_fixup_irqs(); - local_irq_enable(); flush_cache_all(); local_flush_tlb_all(); diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index f57b96dcf7df..61a4460d67d3 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_SCTP=m CONFIG_BRIDGE=m @@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_IDE_GENERIC=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_PIIX=y -CONFIG_BLK_DEV_IT8213=m -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m -CONFIG_SCSI=m -CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m CONFIG_SCSI_AIC7XXX=m CONFIG_AIC7XXX_RESET_DELAY_MS=15000 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_ATA=y +CONFIG_ATA_PIIX=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m @@ -340,6 +332,7 @@ CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_REISERFS_FS_XATTR=y @@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 994d21939676..affebb78f5d6 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode) return SIGFPE; /* set FRE */ - write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE); + set_c0_config5(MIPS_CONF5_FRE); goto fr_common; case FPU_64BIT: @@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode) #endif /* fall through */ case FPU_32BIT: - /* clear FRE */ - write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE); + if (cpu_has_fre) { + /* clear FRE */ + clear_c0_config5(MIPS_CONF5_FRE); + } fr_common: /* set CU1 & change FR appropriately */ fr = (int)mode & FPU_FR_MASK; @@ -182,25 +184,32 @@ static inline int init_fpu(void) int ret = 0; if (cpu_has_fpu) { + unsigned int config5; + ret = __own_fpu(); - if (!ret) { - unsigned int config5 = read_c0_config5(); - - /* - * Ensure FRE is clear whilst running _init_fpu, since - * single precision FP instructions are used. If FRE - * was set then we'll just end up initialising all 32 - * 64b registers. - */ - write_c0_config5(config5 & ~MIPS_CONF5_FRE); - enable_fpu_hazard(); + if (ret) + return ret; + if (!cpu_has_fre) { _init_fpu(); - /* Restore FRE */ - write_c0_config5(config5); - enable_fpu_hazard(); + return 0; } + + /* + * Ensure FRE is clear whilst running _init_fpu, since + * single precision FP instructions are used. If FRE + * was set then we'll just end up initialising all 32 + * 64b registers. + */ + config5 = clear_c0_config5(MIPS_CONF5_FRE); + enable_fpu_hazard(); + + _init_fpu(); + + /* Restore FRE */ + write_c0_config5(config5); + enable_fpu_hazard(); } else fpu_emulator_init_fpu(); diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h index f8d37d1df5de..9fac64a26353 100644 --- a/arch/mips/include/asm/fw/arc/hinv.h +++ b/arch/mips/include/asm/fw/arc/hinv.h @@ -119,7 +119,7 @@ union key_u { #define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */ #endif -typedef struct component { +typedef struct { CONFIGCLASS Class; CONFIGTYPE Type; IDENTIFIERFLAG Flags; @@ -140,7 +140,7 @@ struct cfgdata { }; /* System ID */ -typedef struct systemid { +typedef struct { CHAR VendorId[8]; CHAR ProductId[8]; } SYSTEMID; @@ -166,7 +166,7 @@ typedef enum memorytype { #endif /* _NT_PROM */ } MEMORYTYPE; -typedef struct memorydescriptor { +typedef struct { MEMORYTYPE Type; LONG BasePage; LONG PageCount; diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index b95a827d763e..59c0901bdd84 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void) /* Macros to ease the creation of register access functions */ #define BUILD_CM_R_(name, off) \ -static inline u32 *addr_gcr_##name(void) \ +static inline u32 __iomem *addr_gcr_##name(void) \ { \ - return (u32 *)(mips_cm_base + (off)); \ + return (u32 __iomem *)(mips_cm_base + (off)); \ } \ \ static inline u32 read_gcr_##name(void) \ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5e4aef304b02..5b720d8c2745 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1386,12 +1386,27 @@ do { \ __res; \ }) +#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set reorder \n" \ + " "STR(gas_hardfloat)" \n" \ + " ctc1 %0,"STR(dest)" \n" \ + " .set pop \n" \ + : : "r" (val)); \ +} while (0) + #ifdef GAS_HAS_SET_HARDFLOAT #define read_32bit_cp1_register(source) \ _read_32bit_cp1_register(source, .set hardfloat) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, .set hardfloat) #else #define read_32bit_cp1_register(source) \ _read_32bit_cp1_register(source, ) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, ) #endif #ifdef HAVE_AS_DSP diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index bb7963753730..6499d93ae68d 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -29,13 +29,7 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ - if ((config_enabled(CONFIG_32BIT) || - test_tsk_thread_flag(task, TIF_32BIT_REGS)) && - (regs->regs[2] == __NR_syscall)) - return regs->regs[4]; - else - return regs->regs[2]; + return current_thread_info()->syscall; } static inline unsigned long mips_get_syscall_arg(unsigned long *arg, diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 99eea59604e9..e4440f92b366 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -36,6 +36,7 @@ struct thread_info { */ struct restart_block restart_block; struct pt_regs *regs; + long syscall; /* syscall number */ }; /* diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index d001bb1ad177..c03088f9f514 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -376,16 +376,17 @@ #define __NR_getrandom (__NR_Linux + 353) #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) +#define __NR_execveat (__NR_Linux + 356) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 355 +#define __NR_Linux_syscalls 356 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 355 +#define __NR_O32_Linux_syscalls 356 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -709,16 +710,17 @@ #define __NR_getrandom (__NR_Linux + 313) #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) +#define __NR_execveat (__NR_Linux + 316) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 315 +#define __NR_Linux_syscalls 316 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 315 +#define __NR_64_Linux_syscalls 316 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1046,15 +1048,16 @@ #define __NR_getrandom (__NR_Linux + 317) #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) +#define __NR_execveat (__NR_Linux + 320) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 319 +#define __NR_Linux_syscalls 320 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 319 +#define __NR_N32_Linux_syscalls 320 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c index 2531da1d3add..97206b3deb97 100644 --- a/arch/mips/jz4740/irq.c +++ b/arch/mips/jz4740/irq.c @@ -30,6 +30,9 @@ #include <asm/irq_cpu.h> #include <asm/mach-jz4740/base.h> +#include <asm/mach-jz4740/irq.h> + +#include "irq.h" static void __iomem *jz_intc_base; diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index c92b15df6893..a5b5b56485c1 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -19,8 +19,8 @@ enum { int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool is_interp, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; - struct elf_phdr *phdr = _phdr; + struct elf32_hdr *ehdr = _ehdr; + struct elf32_phdr *phdr = _phdr; struct mips_elf_abiflags_v0 abiflags; int ret; @@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) +static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) { /* If the ABI requirement is provided, simply return that */ if (in_abi != -1) @@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; + struct elf32_hdr *ehdr = _ehdr; unsigned fp_abi, interp_fp_abi, abi0, abi1; /* Ignore non-O32 binaries */ diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 590c2c980fd3..6eb7a3f515fc 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = { .irq_mask_ack = mask_mips_irq, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; /* @@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = { .irq_mask_ack = mips_mt_cpu_irq_ack, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; asmlinkage void __weak plat_irq_dispatch(void) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb76434828e8..85bff5d513e5 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -82,6 +82,30 @@ void flush_thread(void) { } +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * Save any process state which is live in hardware registers to the + * parent context prior to duplication. This prevents the new child + * state becoming stale if the parent is preempted before copy_thread() + * gets a chance to save the parent's live hardware registers to the + * child context. + */ + preempt_disable(); + + if (is_msa_enabled()) + save_msa(current); + else if (is_fpu_owner()) + _save_fp(current); + + save_dsp(current); + + preempt_enable(); + + *dst = *src; + return 0; +} + int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { @@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; - preempt_disable(); - - if (is_msa_enabled()) - save_msa(p); - else if (is_fpu_owner()) - save_fp(p); - - if (cpu_has_dsp) - save_dsp(p); - - preempt_enable(); - /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 9d1487d83293..510452812594 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) long ret = 0; user_exit(); + current_thread_info()->syscall = syscall; + if (secure_computing() == -1) return -1; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 00cad1005a16..6e8de80bb446 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -181,6 +181,7 @@ illegal_syscall: sll t1, t0, 2 beqz v0, einval lw t2, sys_call_table(t1) # syscall routine + sw a0, PT_R2(sp) # call routine directly on restart /* Some syscalls like execve get their arguments from struct pt_regs and claim zero arguments in the syscall table. Thus we have to @@ -580,3 +581,4 @@ EXPORT(sys_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ + PTR sys_execveat diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 5251565e344b..ad4d44635c76 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -435,4 +435,5 @@ EXPORT(sys_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 5315 */ + PTR sys_execveat .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 77e74398b828..446cc654da56 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -428,4 +428,5 @@ EXPORT(sysn32_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf + PTR compat_sys_execveat /* 6320 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 6f8db9f728e8..d07b210fbeff 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -186,6 +186,7 @@ LEAF(sys32_syscall) dsll t1, t0, 3 beqz v0, einval ld t2, sys32_call_table(t1) # syscall routine + sd a0, PT_R2(sp) # call routine directly on restart move a0, a1 # shift argument registers move a1, a2 @@ -565,4 +566,5 @@ EXPORT(sys32_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ + PTR compat_sys_execveat .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 1e0a93c5a3e7..e36a859af666 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -44,8 +44,8 @@ static void cmp_init_secondary(void) struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; /* Assume GIC is present */ - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | - STATUSF_IP7); + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); /* Enable per-cpu interrupts: platform specific */ diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index ad86951b73bd..17ea705f6c40 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -161,7 +161,8 @@ static void vsmp_init_secondary(void) #ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | + STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); else #endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c94c4e92e17d..1c0d8c50b7e1 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -123,10 +123,10 @@ asmlinkage void start_secondary(void) unsigned int cpu; cpu_probe(); - cpu_report(); per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); + cpu_report(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ad3d2031c327..c3b41e24c05a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa) /* Restore the scalar FP control & status register */ if (!was_fpu_owner) - asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + write_32bit_cp1_register(CP1_STATUS, + current->thread.fpu.fcr31); } out: diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index e90b2e899291..30639a6e9b8c 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -489,6 +489,8 @@ static void r4k_tlb_configure(void) #ifdef CONFIG_64BIT pg |= PG_ELPA; #endif + if (cpu_has_rixiex) + pg |= PG_IEC; write_c0_pagegrain(pg); } diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index faed90240ded..6d6df839948f 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) -/* - * Internal debugging function - */ -#ifdef CONFIG_DEBUG_PAGEALLOC -extern void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* __ASSEMBLY__ */ #endif /* _ASM_CACHEFLUSH_H */ diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 34429d5a0ccd..d194c0427b26 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -159,9 +159,11 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { - pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " - "cause %ld\n", current->comm, SIGSEGV, address, cause); - show_regs(regs); + if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { + pr_info("%s: unhandled page fault (%d) at 0x%08lx, " + "cause %ld\n", current->comm, SIGSEGV, address, cause); + show_regs(regs); + } _exception(SIGSEGV, regs, code, address); return; } diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 5b9312220e84..30b35fff2dea 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) - - -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 3e20383d0921..58fae7d098cf 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -4,10 +4,6 @@ /* Caches aren't brain-dead on the s390. */ #include <asm-generic/cacheflush.h> -#ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index 38965379e350..68513c41e10d 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *, #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* !__ASSEMBLY__ */ #endif /* _SPARC64_CACHEFLUSH_H */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 15c29096136b..36a83617eb21 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -552,7 +552,7 @@ static int __init microcode_init(void) int error; if (paravirt_enabled() || dis_ucode_ldr) - return 0; + return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) microcode_ops = init_intel_microcode(); diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 7b20bccf3648..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), }, }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"), + }, + }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"), + }, + }, {} }; diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 6774a0e69867..1630a20d5dcf 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -15,26 +15,6 @@ static void blk_mq_sysfs_release(struct kobject *kobj) { - struct request_queue *q; - - q = container_of(kobj, struct request_queue, mq_kobj); - free_percpu(q->queue_ctx); -} - -static void blk_mq_ctx_release(struct kobject *kobj) -{ - struct blk_mq_ctx *ctx; - - ctx = container_of(kobj, struct blk_mq_ctx, kobj); - kobject_put(&ctx->queue->mq_kobj); -} - -static void blk_mq_hctx_release(struct kobject *kobj) -{ - struct blk_mq_hw_ctx *hctx; - - hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); - kfree(hctx); } struct blk_mq_ctx_sysfs_entry { @@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = { static struct kobj_type blk_mq_ctx_ktype = { .sysfs_ops = &blk_mq_sysfs_ops, .default_attrs = default_ctx_attrs, - .release = blk_mq_ctx_release, + .release = blk_mq_sysfs_release, }; static struct kobj_type blk_mq_hw_ktype = { .sysfs_ops = &blk_mq_hw_sysfs_ops, .default_attrs = default_hw_ctx_attrs, - .release = blk_mq_hctx_release, + .release = blk_mq_sysfs_release, }; static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) @@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) return ret; hctx_for_each_ctx(hctx, ctx, i) { - kobject_get(&q->mq_kobj); ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) break; diff --git a/block/blk-mq.c b/block/blk-mq.c index 9ee3b87c4498..2390c5541e71 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, mutex_unlock(&set->tag_list_lock); } +/* + * It is the actual release handler for mq, but we do it from + * request queue's release handler for avoiding use-after-free + * and headache because q->mq_kobj shouldn't have been introduced, + * but we can't group ctx/kctx kobj without it. + */ +void blk_mq_release(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + /* hctx kobj stays in hctx */ + queue_for_each_hw_ctx(q, hctx, i) + kfree(hctx); + + kfree(q->queue_hw_ctx); + + /* ctx kobj stays in queue_ctx */ + free_percpu(q->queue_ctx); +} + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; @@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q) percpu_ref_exit(&q->mq_usage_counter); - kfree(q->queue_hw_ctx); kfree(q->mq_map); - q->queue_hw_ctx = NULL; q->mq_map = NULL; mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 4f4f943c22c3..6a48c4c0d8a2 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_rq_timed_out(struct request *req, bool reserved); +void blk_mq_release(struct request_queue *q); + /* * Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 935ea2aa0730..faaf36ade7eb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj) if (!q->mq_ops) blk_free_flush_queue(q->fq); + else + blk_mq_release(q); blk_trace_shutdown(q); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 4f3febf8a589..e75737fd7eef 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -1,7 +1,7 @@ /* * ACPI support for Intel Lynxpoint LPSS. * - * Copyright (C) 2013, 2014, Intel Corporation + * Copyright (C) 2013, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> * @@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_CLK_DIVIDER BIT(2) #define LPSS_LTR BIT(3) #define LPSS_SAVE_CTX BIT(4) -#define LPSS_DEV_PROXY BIT(5) -#define LPSS_PROXY_REQ BIT(6) struct lpss_private_data; @@ -72,10 +70,8 @@ struct lpss_device_desc { void (*setup)(struct lpss_private_data *pdata); }; -static struct device *proxy_device; - static struct lpss_device_desc lpss_dma_desc = { - .flags = LPSS_CLK | LPSS_PROXY_REQ, + .flags = LPSS_CLK, }; struct lpss_private_data { @@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = { }; static struct lpss_device_desc byt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | - LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = lpss_uart_setup, }; static struct lpss_device_desc byt_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | - LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, }; static struct lpss_device_desc byt_sdio_dev_desc = { - .flags = LPSS_CLK | LPSS_DEV_PROXY, + .flags = LPSS_CLK, }; static struct lpss_device_desc byt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = byt_i2c_setup, }; @@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) { - if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY) - proxy_device = &pdev->dev; return 1; } @@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_save_ctx(dev, pdata); - ret = acpi_dev_runtime_suspend(dev); - if (ret) - return ret; - - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) - return pm_runtime_put_sync_suspend(proxy_device); - - return 0; + return acpi_dev_runtime_suspend(dev); } static int acpi_lpss_runtime_resume(struct device *dev) @@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) { - ret = pm_runtime_get_sync(proxy_device); - if (ret) - return ret; - } - ret = acpi_dev_runtime_resume(dev); if (ret) return ret; diff --git a/drivers/char/random.c b/drivers/char/random.c index 04645c09fe5e..9cd6968e2f92 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f) __u32 c = f->pool[2], d = f->pool[3]; a += b; c += d; - b = rol32(a, 6); d = rol32(c, 27); + b = rol32(b, 6); d = rol32(d, 27); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 16); d = rol32(c, 14); + b = rol32(b, 16); d = rol32(d, 14); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 6); d = rol32(c, 27); + b = rol32(b, 6); d = rol32(d, 27); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 16); d = rol32(c, 14); + b = rol32(b, 16); d = rol32(d, 14); d ^= a; b ^= c; f->pool[0] = a; f->pool[1] = b; diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index da9c316059bc..eea5d7e578c9 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client, client->irq = irq_of_parse_and_map(client->dev.of_node, 0); } else { pdata = dev_get_platdata(&client->dev); - if (!pdata || !gpio_is_valid(pdata->base)) { - dev_dbg(&client->dev, "invalid platform data\n"); - return -EINVAL; + if (!pdata) { + pdata = devm_kzalloc(&client->dev, + sizeof(struct mcp23s08_platform_data), + GFP_KERNEL); + pdata->base = -1; } } @@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi) } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); - if (!pdata || !gpio_is_valid(pdata->base)) { - dev_dbg(&spi->dev, - "invalid or missing platform data\n"); - return -EINVAL; + if (!pdata) { + pdata = devm_kzalloc(&spi->dev, + sizeof(struct mcp23s08_platform_data), + GFP_KERNEL); + pdata->base = -1; } for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 30646cfe0efa..f476ae2eb0b3 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -88,6 +88,8 @@ struct gpio_bank { #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) #define LINE_USED(line, offset) (line & (BIT(offset))) +static void omap_gpio_unmask_irq(struct irq_data *d); + static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) { return bank->chip.base + gpio_irq; @@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask) return readl_relaxed(reg) & mask; } +static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio, + unsigned offset) +{ + if (!LINE_USED(bank->mod_usage, offset)) { + omap_enable_gpio_module(bank, offset); + omap_set_gpio_direction(bank, offset, 1); + } + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); +} + static int omap_gpio_irq_type(struct irq_data *d, unsigned type) { struct gpio_bank *bank = omap_irq_data_get_bank(d); @@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&bank->lock, flags); offset = GPIO_INDEX(bank, gpio); retval = omap_set_gpio_triggering(bank, offset, type); - if (!LINE_USED(bank->mod_usage, offset)) { - omap_enable_gpio_module(bank, offset); - omap_set_gpio_direction(bank, offset, 1); - } else if (!omap_gpio_is_input(bank, BIT(offset))) { + omap_gpio_init_irq(bank, gpio, offset); + if (!omap_gpio_is_input(bank, BIT(offset))) { spin_unlock_irqrestore(&bank->lock, flags); return -EINVAL; } - - bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); spin_unlock_irqrestore(&bank->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) @@ -792,6 +800,24 @@ exit: pm_runtime_put(bank->dev); } +static unsigned int omap_gpio_irq_startup(struct irq_data *d) +{ + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); + unsigned long flags; + unsigned offset = GPIO_INDEX(bank, gpio); + + if (!BANK_USED(bank)) + pm_runtime_get_sync(bank->dev); + + spin_lock_irqsave(&bank->lock, flags); + omap_gpio_init_irq(bank, gpio, offset); + spin_unlock_irqrestore(&bank->lock, flags); + omap_gpio_unmask_irq(d); + + return 0; +} + static void omap_gpio_irq_shutdown(struct irq_data *d) { struct gpio_bank *bank = omap_irq_data_get_bank(d); @@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev) if (!irqc) return -ENOMEM; + irqc->irq_startup = omap_gpio_irq_startup, irqc->irq_shutdown = omap_gpio_irq_shutdown, irqc->irq_ack = omap_gpio_ack_irq, irqc->irq_mask = omap_gpio_mask_irq, diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index f62aa115d79a..7722ed53bd65 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name, if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); + put_device(tdev); } else { status = -ENODEV; } @@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) } status = sysfs_set_active_low(desc, dev, value); - + put_device(dev); unlock: mutex_unlock(&sysfs_lock); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0d8694f015c1..0fd592799d58 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -822,7 +822,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, * Unconditionally decrement this counter, regardless of the queue's * type. */ - dqm->total_queue_count++; + dqm->total_queue_count--; pr_debug("Total of %d queues are accountable so far\n", dqm->total_queue_count); mutex_unlock(&dqm->lock); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index a8be6df85347..1c385c23dd0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -95,10 +95,10 @@ static int __init kfd_module_init(void) } /* Verify module parameters */ - if ((max_num_of_queues_per_device < 0) || + if ((max_num_of_queues_per_device < 1) || (max_num_of_queues_per_device > KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { - pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); + pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); return -1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index f37cf5efe642..2fda1927bff7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -315,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, BUG_ON(!pqm); pqn = get_queue_by_qid(pqm, qid); - BUG_ON(!pqn); + if (!pqn) { + pr_debug("amdkfd: No queue %d exists for update operation\n", + qid); + return -EFAULT; + } pqn->q->properties.queue_address = p->queue_address; pqn->q->properties.queue_size = p->queue_size; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index c2a1cba1e984..b9140032962d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -16,9 +16,12 @@ #include "cirrus_drv.h" int cirrus_modeset = -1; +int cirrus_bpp = 24; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, cirrus_modeset, int, 0400); +MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)"); +module_param_named(bpp, cirrus_bpp, int, 0400); /* * This is the generic driver code. This binds the driver to the drm core, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 693a4565c4ff..705061537a27 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) int cirrus_bo_push_sysram(struct cirrus_bo *bo); int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); + +extern int cirrus_bpp; + #endif /* __CIRRUS_DRV_H__ */ diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 4c2d68e9102d..e4b976658087 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ const int max_size = cdev->mc.vram_size; + if (bpp > cirrus_bpp) + return false; if (bpp > 32) return false; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 99d4a74ffeaf..61385f2298bf 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector) int count; /* Just add a static list of modes */ - count = drm_add_modes_noedid(connector, 1280, 1024); - drm_set_preferred_mode(connector, 1024, 768); + if (cirrus_bpp <= 24) { + count = drm_add_modes_noedid(connector, 1280, 1024); + drm_set_preferred_mode(connector, 1024, 768); + } else { + count = drm_add_modes_noedid(connector, 800, 600); + drm_set_preferred_mode(connector, 800, 600); + } return count; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 9e7f23dd14bd..87d5fb21cb61 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -34,7 +34,8 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, - int flag, int n) + int flag, int n, + struct reservation_object *resv) { unsigned long start_jiffies; unsigned long end_jiffies; @@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, case RADEON_BENCHMARK_COPY_DMA: fence = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; case RADEON_BENCHMARK_COPY_BLIT: fence = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; default: DRM_ERROR("Unknown copy method\n"); @@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_DMA, n); + RADEON_BENCHMARK_COPY_DMA, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) @@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.blit) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); + RADEON_BENCHMARK_COPY_BLIT, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 102116902a07..913fafa597ad 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && pll->flags & RADEON_PLL_USE_REF_DIV) ref_div_max = pll->reference_div; + else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) + /* fix for problems on RS880 */ + ref_div_max = min(pll->max_ref_div, 7u); else ref_div_max = pll->max_ref_div; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d0b4f7d1140d..ac3c1310b953 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3cf9c1fa6475..686411e4e4f6 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - vm = &fpriv->vm; - r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - return r; - } - if (rdev->accel_working) { + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); + if (r) { + kfree(fpriv); + return r; + } + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); @@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev, radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } + radeon_vm_fini(rdev, vm); } - radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 07b506b41008..791818165c76 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); r = PTR_ERR(fence); @@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); r = PTR_ERR(fence); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 06d2246d07f1..2a5a4a9e772d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -743,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, */ /* NI is optimized for 256KB fragments, SI and newer for 64KB */ - uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? + uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; - uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; + uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; uint64_t frag_start = ALIGN(pe_start, frag_align); uint64_t frag_end = pe_end & ~(frag_align - 1); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index a7de26d1ac80..d931cbbed240 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1389,6 +1389,7 @@ config SENSORS_ADS1015 config SENSORS_ADS7828 tristate "Texas Instruments ADS7828 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for Texas Instruments ADS7828 and ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while @@ -1430,8 +1431,8 @@ config SENSORS_INA2XX tristate "Texas Instruments INA219 and compatibles" depends on I2C help - If you say yes here you get support for INA219, INA220, INA226, and - INA230 power monitor chips. + If you say yes here you get support for INA219, INA220, INA226, + INA230, and INA231 power monitor chips. The INA2xx driver is configured for the default configuration of the part as described in the datasheet. diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 13875968c844..6cb89c0ebab6 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->min[attr->index]); + return sprintf(buf, "%lu\n", data->min[attr->index]); } static ssize_t show_max(struct device *dev, @@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->max[attr->index]); + return sprintf(buf, "%lu\n", data->max[attr->index]); } static ssize_t show_max_hyst(struct device *dev, @@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); + return sprintf(buf, "%lu\n", data->max_hyst[attr->index]); } static ssize_t show_min_alarm(struct device *dev, diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index f4f9b219bf16..11955467fc0f 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <linux/bitops.h> /* * AD7314 temperature masks @@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; - data = (data << 6) >> 6; + data = sign_extend32(data, 9); return sprintf(buf, "%d\n", 250 * data); case adt7301: @@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, * register. 1lsb - 31.25 milli degrees centigrade */ data = ret & ADT7301_TEMP_MASK; - data = (data << 2) >> 2; + data = sign_extend32(data, 13); return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(data * 3125, 100)); diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 0625e50d7a6e..ad2b47e40345 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -27,6 +27,7 @@ #include <linux/err.h> #include <linux/regulator/consumer.h> #include <linux/mutex.h> +#include <linux/bitops.h> /* Addresses to scan * The chip also supports addresses 0x35..0x37. Don't scan those addresses @@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev, if (IS_ERR(data)) return PTR_ERR(data); - temp = (data->temp[index] << 7) >> 7; /* sign extend */ + temp = sign_extend32(data->temp[index], 8); return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ } diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index a622d40eec17..bce4e9ff21bf 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -30,14 +30,12 @@ #include <linux/hwmon-sysfs.h> #include <linux/i2c.h> #include <linux/init.h> -#include <linux/jiffies.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/platform_data/ads7828.h> +#include <linux/regmap.h> #include <linux/slab.h> /* The ADS7828 registers */ -#define ADS7828_NCH 8 /* 8 channels supported */ #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ #define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ #define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ @@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 }; /* Client specific data */ struct ads7828_data { - struct i2c_client *client; - struct mutex update_lock; /* Mutex protecting updates */ - unsigned long last_updated; /* Last updated time (in jiffies) */ - u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */ - bool valid; /* Validity flag */ - bool diff_input; /* Differential input */ - bool ext_vref; /* External voltage reference */ - unsigned int vref_mv; /* voltage reference value */ + struct regmap *regmap; u8 cmd_byte; /* Command byte without channel bits */ unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ - s32 (*read_channel)(const struct i2c_client *client, u8 command); }; /* Command byte C2,C1,C0 - see datasheet */ @@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch) return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); } -/* Update data for the device (all 8 channels) */ -static struct ads7828_data *ads7828_update_device(struct device *dev) -{ - struct ads7828_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ + HZ / 2) - || !data->valid) { - unsigned int ch; - dev_dbg(&client->dev, "Starting ads7828 update\n"); - - for (ch = 0; ch < ADS7828_NCH; ch++) { - u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch); - data->adc_input[ch] = data->read_channel(client, cmd); - } - data->last_updated = jiffies; - data->valid = true; - } - - mutex_unlock(&data->update_lock); - - return data; -} - /* sysfs callback function */ static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct ads7828_data *data = ads7828_update_device(dev); - unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * - data->lsb_resol, 1000); + struct ads7828_data *data = dev_get_drvdata(dev); + u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index); + unsigned int regval; + int err; - return sprintf(buf, "%d\n", value); + err = regmap_read(data->regmap, cmd, ®val); + if (err < 0) + return err; + + return sprintf(buf, "%d\n", + DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000)); } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); @@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = { ATTRIBUTE_GROUPS(ads7828); +static const struct regmap_config ads2828_regmap_config = { + .reg_bits = 8, + .val_bits = 16, +}; + +static const struct regmap_config ads2830_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client, struct ads7828_platform_data *pdata = dev_get_platdata(dev); struct ads7828_data *data; struct device *hwmon_dev; + unsigned int vref_mv = ADS7828_INT_VREF_MV; + bool diff_input = false; + bool ext_vref = false; data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); if (!data) return -ENOMEM; if (pdata) { - data->diff_input = pdata->diff_input; - data->ext_vref = pdata->ext_vref; - if (data->ext_vref) - data->vref_mv = pdata->vref_mv; + diff_input = pdata->diff_input; + ext_vref = pdata->ext_vref; + if (ext_vref && pdata->vref_mv) + vref_mv = pdata->vref_mv; } - /* Bound Vref with min/max values if it was provided */ - if (data->vref_mv) - data->vref_mv = clamp_val(data->vref_mv, - ADS7828_EXT_VREF_MV_MIN, - ADS7828_EXT_VREF_MV_MAX); - else - data->vref_mv = ADS7828_INT_VREF_MV; + /* Bound Vref with min/max values */ + vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN, + ADS7828_EXT_VREF_MV_MAX); /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ if (id->driver_data == ads7828) { - data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); - data->read_channel = i2c_smbus_read_word_swapped; + data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096); + data->regmap = devm_regmap_init_i2c(client, + &ads2828_regmap_config); } else { - data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); - data->read_channel = i2c_smbus_read_byte_data; + data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256); + data->regmap = devm_regmap_init_i2c(client, + &ads2830_regmap_config); } - data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; - if (!data->diff_input) + data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; + if (!diff_input) data->cmd_byte |= ADS7828_CMD_SD_SE; - data->client = client; - mutex_init(&data->update_lock); - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, ads7828_groups); diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e01feba909c3..d1542b7d4bc3 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -35,6 +35,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/of.h> +#include <linux/delay.h> #include <linux/platform_data/ina2xx.h> @@ -51,7 +52,6 @@ #define INA226_ALERT_LIMIT 0x07 #define INA226_DIE_ID 0xFF - /* register count */ #define INA219_REGISTERS 6 #define INA226_REGISTERS 8 @@ -64,6 +64,24 @@ /* worst case is 68.10 ms (~14.6Hz, ina219) */ #define INA2XX_CONVERSION_RATE 15 +#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */ + +#define INA2XX_RSHUNT_DEFAULT 10000 + +/* bit mask for reading the averaging setting in the configuration register */ +#define INA226_AVG_RD_MASK 0x0E00 + +#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) +#define INA226_SHIFT_AVG(val) ((val) << 9) + +/* common attrs, ina226 attrs and NULL */ +#define INA2XX_MAX_ATTRIBUTE_GROUPS 3 + +/* + * Both bus voltage and shunt voltage conversion times for ina226 are set + * to 0b0100 on POR, which translates to 2200 microseconds in total. + */ +#define INA226_TOTAL_CONV_TIME_DEFAULT 2200 enum ina2xx_ids { ina219, ina226 }; @@ -81,11 +99,16 @@ struct ina2xx_data { struct i2c_client *client; const struct ina2xx_config *config; + long rshunt; + u16 curr_config; + struct mutex update_lock; bool valid; unsigned long last_updated; + int update_interval; /* in jiffies */ int kind; + const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS]; u16 regs[INA2XX_MAX_REGISTERS]; }; @@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = { }, }; -static struct ina2xx_data *ina2xx_update_device(struct device *dev) +/* + * Available averaging rates for ina226. The indices correspond with + * the bit values expected by the chip (according to the ina226 datasheet, + * table 3 AVG bit settings, found at + * http://www.ti.com/lit/ds/symlink/ina226.pdf. + */ +static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 }; + +static int ina226_avg_bits(int avg) +{ + int i; + + /* Get the closest average from the tab. */ + for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) { + if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2) + break; + } + + return i; /* Return 0b0111 for values greater than 1024. */ +} + +static int ina226_reg_to_interval(u16 config) +{ + int avg = ina226_avg_tab[INA226_READ_AVG(config)]; + + /* + * Multiply the total conversion time by the number of averages. + * Return the result in milliseconds. + */ + return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000); +} + +static u16 ina226_interval_to_reg(int interval, u16 config) +{ + int avg, avg_bits; + + avg = DIV_ROUND_CLOSEST(interval * 1000, + INA226_TOTAL_CONV_TIME_DEFAULT); + avg_bits = ina226_avg_bits(avg); + + return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits); +} + +static void ina226_set_update_interval(struct ina2xx_data *data) +{ + int ms; + + ms = ina226_reg_to_interval(data->curr_config); + data->update_interval = msecs_to_jiffies(ms); +} + +static int ina2xx_calibrate(struct ina2xx_data *data) +{ + u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, + data->rshunt); + + return i2c_smbus_write_word_swapped(data->client, + INA2XX_CALIBRATION, val); +} + +/* + * Initialize the configuration and calibration registers. + */ +static int ina2xx_init(struct ina2xx_data *data) { - struct ina2xx_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; - struct ina2xx_data *ret = data; + int ret; - mutex_lock(&data->update_lock); + /* device configuration */ + ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, + data->curr_config); + if (ret < 0) + return ret; - if (time_after(jiffies, data->last_updated + - HZ / INA2XX_CONVERSION_RATE) || !data->valid) { + /* + * Set current LSB to 1mA, shunt is in uOhms + * (equation 13 in datasheet). + */ + return ina2xx_calibrate(data); +} - int i; +static int ina2xx_do_update(struct device *dev) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + int i, rv, retry; - dev_dbg(&client->dev, "Starting ina2xx update\n"); + dev_dbg(&client->dev, "Starting ina2xx update\n"); + for (retry = 5; retry; retry--) { /* Read all registers */ for (i = 0; i < data->config->registers; i++) { - int rv = i2c_smbus_read_word_swapped(client, i); - if (rv < 0) { - ret = ERR_PTR(rv); - goto abort; - } + rv = i2c_smbus_read_word_swapped(client, i); + if (rv < 0) + return rv; data->regs[i] = rv; } + + /* + * If the current value in the calibration register is 0, the + * power and current registers will also remain at 0. In case + * the chip has been reset let's check the calibration + * register and reinitialize if needed. + */ + if (data->regs[INA2XX_CALIBRATION] == 0) { + dev_warn(dev, "chip not calibrated, reinitializing\n"); + + rv = ina2xx_init(data); + if (rv < 0) + return rv; + + /* + * Let's make sure the power and current registers + * have been updated before trying again. + */ + msleep(INA2XX_MAX_DELAY); + continue; + } + data->last_updated = jiffies; data->valid = 1; + + return 0; } -abort: + + /* + * If we're here then although all write operations succeeded, the + * chip still returns 0 in the calibration register. Nothing more we + * can do here. + */ + dev_err(dev, "unable to reinitialize the chip\n"); + return -ENODEV; +} + +static struct ina2xx_data *ina2xx_update_device(struct device *dev) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + struct ina2xx_data *ret = data; + unsigned long after; + int rv; + + mutex_lock(&data->update_lock); + + after = data->last_updated + data->update_interval; + if (time_after(jiffies, after) || !data->valid) { + rv = ina2xx_do_update(dev); + if (rv < 0) + ret = ERR_PTR(rv); + } + mutex_unlock(&data->update_lock); return ret; } @@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg) /* signed register, LSB=1mA (selected), in mA */ val = (s16)data->regs[reg]; break; + case INA2XX_CALIBRATION: + val = DIV_ROUND_CLOSEST(data->config->calibration_factor, + data->regs[reg]); + break; default: /* programmer goofed */ WARN_ON_ONCE(1); @@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index)); } +static ssize_t ina2xx_set_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct ina2xx_data *data = ina2xx_update_device(dev); + unsigned long val; + int status; + + if (IS_ERR(data)) + return PTR_ERR(data); + + status = kstrtoul(buf, 10, &val); + if (status < 0) + return status; + + if (val == 0 || + /* Values greater than the calibration factor make no sense. */ + val > data->config->calibration_factor) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->rshunt = val; + status = ina2xx_calibrate(data); + mutex_unlock(&data->update_lock); + if (status < 0) + return status; + + return count; +} + +static ssize_t ina226_set_interval(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + unsigned long val; + int status; + + status = kstrtoul(buf, 10, &val); + if (status < 0) + return status; + + if (val > INT_MAX || val == 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->curr_config = ina226_interval_to_reg(val, + data->regs[INA2XX_CONFIG]); + status = i2c_smbus_write_word_swapped(data->client, + INA2XX_CONFIG, + data->curr_config); + + ina226_set_update_interval(data); + /* Make sure the next access re-reads all registers. */ + data->valid = 0; + mutex_unlock(&data->update_lock); + if (status < 0) + return status; + + return count; +} + +static ssize_t ina226_show_interval(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct ina2xx_data *data = ina2xx_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + + /* + * We don't use data->update_interval here as we want to display + * the actual interval used by the chip and jiffies_to_msecs() + * doesn't seem to be accurate enough. + */ + return snprintf(buf, PAGE_SIZE, "%d\n", + ina226_reg_to_interval(data->regs[INA2XX_CONFIG])); +} + /* shunt voltage */ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, INA2XX_SHUNT_VOLTAGE); @@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL, static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, INA2XX_POWER); +/* shunt resistance */ +static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, + ina2xx_show_value, ina2xx_set_shunt, + INA2XX_CALIBRATION); + +/* update interval (ina226 only) */ +static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, + ina226_show_interval, ina226_set_interval, 0); + /* pointers to created device attributes */ static struct attribute *ina2xx_attrs[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, + &sensor_dev_attr_shunt_resistor.dev_attr.attr, NULL, }; -ATTRIBUTE_GROUPS(ina2xx); + +static const struct attribute_group ina2xx_group = { + .attrs = ina2xx_attrs, +}; + +static struct attribute *ina226_attrs[] = { + &sensor_dev_attr_update_interval.dev_attr.attr, + NULL, +}; + +static const struct attribute_group ina226_group = { + .attrs = ina226_attrs, +}; static int ina2xx_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client, struct device *dev = &client->dev; struct ina2xx_data *data; struct device *hwmon_dev; - long shunt = 10000; /* default shunt value 10mOhms */ u32 val; - int ret; + int ret, group = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; @@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client, if (dev_get_platdata(dev)) { pdata = dev_get_platdata(dev); - shunt = pdata->shunt_uohms; + data->rshunt = pdata->shunt_uohms; } else if (!of_property_read_u32(dev->of_node, "shunt-resistor", &val)) { - shunt = val; + data->rshunt = val; + } else { + data->rshunt = INA2XX_RSHUNT_DEFAULT; } - if (shunt <= 0) - return -ENODEV; - /* set the device type */ data->kind = id->driver_data; data->config = &ina2xx_config[data->kind]; - - /* device configuration */ - ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, - data->config->config_default); - if (ret < 0) { - dev_err(dev, - "error writing to the config register: %d", ret); - return -ENODEV; - } + data->curr_config = data->config->config_default; + data->client = client; /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). + * Ina226 has a variable update_interval. For ina219 we + * use a constant value. */ - ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, - data->config->calibration_factor / shunt); + if (data->kind == ina226) + ina226_set_update_interval(data); + else + data->update_interval = HZ / INA2XX_CONVERSION_RATE; + + if (data->rshunt <= 0 || + data->rshunt > data->config->calibration_factor) + return -ENODEV; + + ret = ina2xx_init(data); if (ret < 0) { - dev_err(dev, - "error writing to the calibration register: %d", ret); + dev_err(dev, "error configuring the device: %d\n", ret); return -ENODEV; } - data->client = client; mutex_init(&data->update_lock); + data->groups[group++] = &ina2xx_group; + if (data->kind == ina226) + data->groups[group++] = &ina226_group; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, ina2xx_groups); + data, data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", - id->name, shunt); + id->name, data->rshunt); return 0; } @@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = { { "ina220", ina219 }, { "ina226", ina226 }, { "ina230", ina226 }, + { "ina231", ina226 }, { } }; MODULE_DEVICE_TABLE(i2c, ina2xx_id); diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 388f8bcd898e..996bdfd5cf25 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -201,7 +201,7 @@ struct jc42_data { #define JC42_TEMP_MIN 0 #define JC42_TEMP_MAX 125000 -static u16 jc42_temp_to_reg(int temp, bool extended) +static u16 jc42_temp_to_reg(long temp, bool extended) { int ntemp = clamp_val(temp, extended ? JC42_TEMP_MIN_EXTENDED : @@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended) static int jc42_temp_from_reg(s16 reg) { - reg &= 0x1fff; - - /* sign extend register */ - if (reg & 0x1000) - reg |= 0xf000; + reg = sign_extend32(reg, 12); /* convert from 0.0625 to 0.001 resolution */ return reg * 125 / 2; @@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev, const char *buf, size_t count) { struct jc42_data *data = dev_get_drvdata(dev); - unsigned long val; + long val; int diff, hyst; int err; int ret = count; - if (kstrtoul(buf, 10, &val) < 0) + if (kstrtol(buf, 10, &val) < 0) return -EINVAL; + val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED : + JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX); diff = jc42_temp_from_reg(data->temp[t_crit]) - val; + hyst = 0; if (diff > 0) { if (diff < 2250) diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index ec5678289e4a..55765790907b 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg) return reg != REG_BANK && reg <= 0x20; } -static struct regmap_config nct7802_regmap_config = { +static const struct regmap_config nct7802_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index ba9f478f64ee..9da2735f1424 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int tmp102_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev) config &= ~TMP102_CONF_SD; return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); } - -static const struct dev_pm_ops tmp102_dev_pm_ops = { - .suspend = tmp102_suspend, - .resume = tmp102_resume, -}; - -#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) -#else -#define TMP102_DEV_PM_OPS NULL #endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume); + static const struct i2c_device_id tmp102_id[] = { { "tmp102", 0 }, { } @@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id); static struct i2c_driver tmp102_driver = { .driver.name = DRIVER_NAME, - .driver.pm = TMP102_DEV_PM_OPS, + .driver.pm = &tmp102_dev_pm_ops, .probe = tmp102_probe, .remove = tmp102_remove, .id_table = tmp102_id, diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b716b0815644..643c08a025a5 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow); -IB_UVERBS_DECLARE_EX_CMD(query_device); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 532d8eba8b02..b7943ff16ed3 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -400,52 +400,6 @@ err: return ret; } -static void copy_query_dev_fields(struct ib_uverbs_file *file, - struct ib_uverbs_query_device_resp *resp, - struct ib_device_attr *attr) -{ - resp->fw_ver = attr->fw_ver; - resp->node_guid = file->device->ib_dev->node_guid; - resp->sys_image_guid = attr->sys_image_guid; - resp->max_mr_size = attr->max_mr_size; - resp->page_size_cap = attr->page_size_cap; - resp->vendor_id = attr->vendor_id; - resp->vendor_part_id = attr->vendor_part_id; - resp->hw_ver = attr->hw_ver; - resp->max_qp = attr->max_qp; - resp->max_qp_wr = attr->max_qp_wr; - resp->device_cap_flags = attr->device_cap_flags; - resp->max_sge = attr->max_sge; - resp->max_sge_rd = attr->max_sge_rd; - resp->max_cq = attr->max_cq; - resp->max_cqe = attr->max_cqe; - resp->max_mr = attr->max_mr; - resp->max_pd = attr->max_pd; - resp->max_qp_rd_atom = attr->max_qp_rd_atom; - resp->max_ee_rd_atom = attr->max_ee_rd_atom; - resp->max_res_rd_atom = attr->max_res_rd_atom; - resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; - resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; - resp->atomic_cap = attr->atomic_cap; - resp->max_ee = attr->max_ee; - resp->max_rdd = attr->max_rdd; - resp->max_mw = attr->max_mw; - resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; - resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; - resp->max_mcast_grp = attr->max_mcast_grp; - resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; - resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; - resp->max_ah = attr->max_ah; - resp->max_fmr = attr->max_fmr; - resp->max_map_per_fmr = attr->max_map_per_fmr; - resp->max_srq = attr->max_srq; - resp->max_srq_wr = attr->max_srq_wr; - resp->max_srq_sge = attr->max_srq_sge; - resp->max_pkeys = attr->max_pkeys; - resp->local_ca_ack_delay = attr->local_ca_ack_delay; - resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; -} - ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) @@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, return ret; memset(&resp, 0, sizeof resp); - copy_query_dev_fields(file, &resp, &attr); + + resp.fw_ver = attr.fw_ver; + resp.node_guid = file->device->ib_dev->node_guid; + resp.sys_image_guid = attr.sys_image_guid; + resp.max_mr_size = attr.max_mr_size; + resp.page_size_cap = attr.page_size_cap; + resp.vendor_id = attr.vendor_id; + resp.vendor_part_id = attr.vendor_part_id; + resp.hw_ver = attr.hw_ver; + resp.max_qp = attr.max_qp; + resp.max_qp_wr = attr.max_qp_wr; + resp.device_cap_flags = attr.device_cap_flags; + resp.max_sge = attr.max_sge; + resp.max_sge_rd = attr.max_sge_rd; + resp.max_cq = attr.max_cq; + resp.max_cqe = attr.max_cqe; + resp.max_mr = attr.max_mr; + resp.max_pd = attr.max_pd; + resp.max_qp_rd_atom = attr.max_qp_rd_atom; + resp.max_ee_rd_atom = attr.max_ee_rd_atom; + resp.max_res_rd_atom = attr.max_res_rd_atom; + resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; + resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; + resp.atomic_cap = attr.atomic_cap; + resp.max_ee = attr.max_ee; + resp.max_rdd = attr.max_rdd; + resp.max_mw = attr.max_mw; + resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; + resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; + resp.max_mcast_grp = attr.max_mcast_grp; + resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; + resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; + resp.max_ah = attr.max_ah; + resp.max_fmr = attr.max_fmr; + resp.max_map_per_fmr = attr.max_map_per_fmr; + resp.max_srq = attr.max_srq; + resp.max_srq_wr = attr.max_srq_wr; + resp.max_srq_sge = attr.max_srq_sge; + resp.max_pkeys = attr.max_pkeys; + resp.local_ca_ack_delay = attr.local_ca_ack_delay; + resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) @@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, return ret ? ret : in_len; } - -int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) -{ - struct ib_uverbs_ex_query_device_resp resp; - struct ib_uverbs_ex_query_device cmd; - struct ib_device_attr attr; - struct ib_device *device; - int err; - - device = file->device->ib_dev; - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; - - err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); - if (err) - return err; - - if (cmd.reserved) - return -EINVAL; - - err = device->query_device(device, &attr); - if (err) - return err; - - memset(&resp, 0, sizeof(resp)); - copy_query_dev_fields(file, &resp.base, &attr); - resp.comp_mask = 0; - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) { - resp.odp_caps.general_caps = attr.odp_caps.general_caps; - resp.odp_caps.per_transport_caps.rc_odp_caps = - attr.odp_caps.per_transport_caps.rc_odp_caps; - resp.odp_caps.per_transport_caps.uc_odp_caps = - attr.odp_caps.per_transport_caps.uc_odp_caps; - resp.odp_caps.per_transport_caps.ud_odp_caps = - attr.odp_caps.per_transport_caps.ud_odp_caps; - resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP; - } -#endif - - err = ib_copy_to_udata(ucore, &resp, sizeof(resp)); - if (err) - return err; - - return 0; -} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e6c23b9eab33..5db1a8cc388d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, struct ib_udata *uhw) = { [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, - [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8a87404e9c76..03bf81211a54 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_OPEN_QP); - dev->ib_dev.uverbs_ex_cmd_mask = - (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8ba80a6d3a46..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -98,15 +98,9 @@ enum { IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ IPOIB_MCAST_FLAG_SENDONLY = 1, - /* - * For IPOIB_MCAST_FLAG_BUSY - * When set, in flight join and mcast->mc is unreliable - * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or - * haven't started yet - * When clear and mcast->mc is valid pointer, join was successful - */ - IPOIB_MCAST_FLAG_BUSY = 2, + IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ IPOIB_MCAST_FLAG_ATTACHED = 3, + IPOIB_MCAST_JOIN_STARTED = 4, MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, @@ -323,7 +317,6 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; - struct workqueue_struct *wq; struct delayed_work mcast_task; struct work_struct carrier_on_task; struct work_struct flush_light; @@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); void ipoib_pkey_event(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); -int ipoib_ib_dev_open(struct net_device *dev); +int ipoib_ib_dev_open(struct net_device *dev, int flush); int ipoib_ib_dev_up(struct net_device *dev); -int ipoib_ib_dev_down(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +int ipoib_ib_dev_down(struct net_device *dev, int flush); +int ipoib_ib_dev_stop(struct net_device *dev, int flush); void ipoib_pkey_dev_check_presence(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); @@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); -int ipoib_mcast_stop_thread(struct net_device *dev); +int ipoib_mcast_stop_thread(struct net_device *dev, int flush); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959adb6c7d..933efcea0d03 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even } spin_lock_irq(&priv->lock); - queue_delayed_work(priv->wq, + queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); /* Add this entry to passive ids list head, but do not re-add it * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ @@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); ipoib_cm_start_rx_drain(priv); - queue_work(priv->wq, &priv->cm.rx_reap_task); + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); spin_unlock_irqrestore(&priv->lock, flags); } else ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", @@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_move(&p->list, &priv->cm.rx_reap_list); spin_unlock_irqrestore(&priv->lock, flags); - queue_work(priv->wq, &priv->cm.rx_reap_task); + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); } return; } @@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); } clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); @@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); - queue_work(priv->wq, &priv->cm.start_task); + queue_work(ipoib_workqueue, &priv->cm.start_task); return tx; } @@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; @@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, skb_queue_tail(&priv->cm.skb_queue, skb); if (e) - queue_work(priv->wq, &priv->cm.skb_task); + queue_work(ipoib_workqueue, &priv->cm.skb_task); } static void ipoib_cm_rx_reap(struct work_struct *work) @@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) } if (!list_empty(&priv->cm.passive_ids)) - queue_delayed_work(priv->wq, + queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); spin_unlock_irq(&priv->lock); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index fe65abb5150c..72626c348174 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work) __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) - queue_delayed_work(priv->wq, &priv->ah_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); } @@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) drain_tx_cq((struct net_device *)ctx); } -int ipoib_ib_dev_open(struct net_device *dev) +int ipoib_ib_dev_open(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; @@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev) } clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) @@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev) dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_stop(dev, flush); return -1; } @@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev) return ipoib_mcast_start_thread(dev); } -int ipoib_ib_dev_down(struct net_device *dev) +int ipoib_ib_dev_down(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev) clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); - ipoib_mcast_stop_thread(dev); + ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); @@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev) local_bh_enable(); } -int ipoib_ib_dev_stop(struct net_device *dev) +int ipoib_ib_dev_stop(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; @@ -880,7 +880,8 @@ timeout: /* Wait for all AHs to be reaped */ set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); + if (flush) + flush_workqueue(ipoib_workqueue); begin = jiffies; @@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) (unsigned long) dev); if (dev->flags & IFF_UP) { - if (ipoib_ib_dev_open(dev)) { + if (ipoib_ib_dev_open(dev, 1)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } @@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, } if (level >= IPOIB_FLUSH_NORMAL) - ipoib_ib_dev_down(dev); + ipoib_ib_dev_down(dev, 0); if (level == IPOIB_FLUSH_HEAVY) { if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) - ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev) != 0) + ipoib_ib_dev_stop(dev, 0); + if (ipoib_ib_dev_open(dev, 0) != 0) return; if (netif_queue_stopped(dev)) netif_start_queue(dev); @@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) */ ipoib_flush_paths(dev); - ipoib_mcast_stop_thread(dev); + ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); ipoib_transport_dev_cleanup(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6bad17d4d588..58b5aa3b6f2d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev) set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); - if (ipoib_ib_dev_open(dev)) { + if (ipoib_ib_dev_open(dev, 1)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) return 0; goto err_disable; @@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev) return 0; err_stop: - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_stop(dev, 1); err_disable: clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); @@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev) netif_stop_queue(dev); - ipoib_ib_dev_down(dev); - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_down(dev, 1); + ipoib_ib_dev_stop(dev, 0); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; @@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) return; } - queue_work(priv->wq, &priv->restart_task); + queue_work(ipoib_workqueue, &priv->restart_task); } static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) @@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work) __ipoib_reap_neigh(priv); if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) - queue_delayed_work(priv->wq, &priv->neigh_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, arp_tbl.gc_interval); } @@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) /* start garbage collection */ clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); - queue_delayed_work(priv->wq, &priv->neigh_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, arp_tbl.gc_interval); return 0; @@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); + if (ipoib_neigh_hash_init(priv) < 0) + goto out; /* Allocate RX/TX "rings" to hold queued skbs */ priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, GFP_KERNEL); if (!priv->rx_ring) { printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", ca->name, ipoib_recvq_size); - goto out; + goto out_neigh_hash_cleanup; } priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); @@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) if (ipoib_ib_dev_init(dev, ca, port)) goto out_tx_ring_cleanup; - /* - * Must be after ipoib_ib_dev_init so we can allocate a per - * device wq there and use it here - */ - if (ipoib_neigh_hash_init(priv) < 0) - goto out_dev_uninit; - return 0; -out_dev_uninit: - ipoib_ib_dev_cleanup(dev); - out_tx_ring_cleanup: vfree(priv->tx_ring); out_rx_ring_cleanup: kfree(priv->rx_ring); +out_neigh_hash_cleanup: + ipoib_neigh_hash_uninit(dev); out: return -ENOMEM; } @@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev) } unregister_netdevice_many(&head); - /* - * Must be before ipoib_ib_dev_cleanup or we delete an in use - * work queue - */ - ipoib_neigh_hash_uninit(dev); - ipoib_ib_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev) priv->rx_ring = NULL; priv->tx_ring = NULL; + + ipoib_neigh_hash_uninit(dev); } static const struct header_ops ipoib_header_ops = { @@ -1646,7 +1636,7 @@ register_failed: /* Stop GC if started before flush */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(priv->wq); + flush_workqueue(ipoib_workqueue); event_failed: ipoib_dev_cleanup(priv->dev); @@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device) /* Stop GC */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(priv->wq); + flush_workqueue(ipoib_workqueue); unregister_netdev(priv->dev); free_netdev(priv->dev); @@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void) * unregister_netdev() and linkwatch_event take the rtnl lock, * so flush_scheduled_work() can deadlock during device * removal. - * - * In addition, bringing one device up and another down at the - * same time can deadlock a single workqueue, so we have this - * global fallback workqueue, but we also attempt to open a - * per device workqueue each time we bring an interface up */ - ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); + ipoib_workqueue = create_singlethread_workqueue("ipoib"); if (!ipoib_workqueue) { ret = -ENOMEM; goto err_fs; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index bc50dd0d0e4d..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; + + if (!ipoib_cm_admin_enabled(dev)) { + rtnl_lock(); + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_unlock(); + } } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { @@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status, struct ipoib_mcast *mcast = multicast->context; struct net_device *dev = mcast->dev; - /* - * We have to take the mutex to force mcast_sendonly_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - /* We trap for port events ourselves. */ if (status == -ENETRESET) - goto out; + return 0; if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (status) { if (mcast->logcount++ < 20) - ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " - "join failed for %pI6, status %d\n", + ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); /* Flush out any queued packets */ @@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status, dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_unlock_bh(dev); + + /* Clear the busy flag so we try again */ + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, + &mcast->flags); } -out: - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (status) - mcast->mc = NULL; - complete(&mcast->done); - if (status == -ENETRESET) - status = 0; - mutex_unlock(&mcast_mutex); return status; } @@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) int ret = 0; if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { - ipoib_dbg_mcast(priv, "device shutting down, no sendonly " - "multicast joins\n"); + ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); return -ENODEV; } - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { - ipoib_dbg_mcast(priv, "multicast entry busy, skipping " - "sendonly join\n"); + if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { + ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); return -EBUSY; } @@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); - mutex_lock(&mcast_mutex); - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | @@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) if (IS_ERR(mcast->mc)) { ret = PTR_ERR(mcast->mc); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - complete(&mcast->done); - ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " - "failed (ret = %d)\n", ret); + ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", + ret); } else { - ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " - "sendonly join\n", mcast->mcmember.mgid.raw); + ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", + mcast->mcmember.mgid.raw); } - mutex_unlock(&mcast_mutex); return ret; } @@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) carrier_on_task); struct ib_port_attr attr; + /* + * Take rtnl_lock to avoid racing with ipoib_stop() and + * turning the carrier back on while a device is being + * removed. + */ if (ib_query_port(priv->ca, priv->port, &attr) || attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); return; } - /* - * Take rtnl_lock to avoid racing with ipoib_stop() and - * turning the carrier back on while a device is being - * removed. However, ipoib_stop() will attempt to flush - * the workqueue while holding the rtnl lock, so loop - * on trylock until either we get the lock or we see - * FLAG_ADMIN_UP go away as that signals that we are bailing - * and can safely ignore the carrier on work. - */ - while (!rtnl_trylock()) { - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) - return; - else - msleep(20); - } - if (!ipoib_cm_admin_enabled(priv->dev)) - dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_lock(); netif_carrier_on(priv->dev); rtnl_unlock(); } @@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status, ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", mcast->mcmember.mgid.raw, status); - /* - * We have to take the mutex to force mcast_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - /* We trap for port events ourselves. */ - if (status == -ENETRESET) + if (status == -ENETRESET) { + status = 0; goto out; + } if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (!status) { mcast->backoff = 1; + mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, 0); + mutex_unlock(&mcast_mutex); /* - * Defer carrier on work to priv->wq to avoid a + * Defer carrier on work to ipoib_workqueue to avoid a * deadlock on rtnl_lock here. */ if (mcast == priv->broadcast) - queue_work(priv->wq, &priv->carrier_on_task); - } else { - if (mcast->logcount++ < 20) { - if (status == -ETIMEDOUT || status == -EAGAIN) { - ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - } else { - ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - } - } + queue_work(ipoib_workqueue, &priv->carrier_on_task); - mcast->backoff *= 2; - if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) - mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + status = 0; + goto out; } -out: + + if (mcast->logcount++ < 20) { + if (status == -ETIMEDOUT || status == -EAGAIN) { + ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } else { + ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } + } + + mcast->backoff *= 2; + if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) + mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + + /* Clear the busy flag so we try again */ + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + + mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (status) - mcast->mc = NULL; - complete(&mcast->done); - if (status == -ENETRESET) - status = 0; - if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, + if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); - +out: + complete(&mcast->done); return status; } @@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.hop_limit = priv->broadcast->mcmember.hop_limit; } - mutex_lock(&mcast_mutex); - init_completion(&mcast->done); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); + mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); @@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, mcast->backoff * HZ); + mutex_unlock(&mcast_mutex); } - mutex_unlock(&mcast_mutex); } void ipoib_mcast_join_task(struct work_struct *work) @@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work) ipoib_warn(priv, "failed to allocate broadcast group\n"); mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, - HZ); + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, HZ); mutex_unlock(&mcast_mutex); return; } @@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work) } if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { - if (IS_ERR_OR_NULL(priv->broadcast->mc) && - !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) + if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) ipoib_mcast_join(dev, priv->broadcast, 0); return; } @@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work) while (1) { struct ipoib_mcast *mcast = NULL; - /* - * Need the mutex so our flags are consistent, need the - * priv->lock so we don't race with list removals in either - * mcast_dev_flush or mcast_restart_task - */ - mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); list_for_each_entry(mcast, &priv->multicast_list, list) { - if (IS_ERR_OR_NULL(mcast->mc) && - !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && - !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) + && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) + && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { /* Found the next unjoined group */ break; } } spin_unlock_irq(&priv->lock); - mutex_unlock(&mcast_mutex); if (&mcast->list == &priv->multicast_list) { /* All done */ break; } - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - ipoib_mcast_sendonly_join(mcast); - else - ipoib_mcast_join(dev, mcast, 1); + ipoib_mcast_join(dev, mcast, 1); return; } @@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev) mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); return 0; } -int ipoib_mcast_stop_thread(struct net_device *dev) +int ipoib_mcast_stop_thread(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev) cancel_delayed_work(&priv->mcast_task); mutex_unlock(&mcast_mutex); - flush_workqueue(priv->wq); + if (flush) + flush_workqueue(ipoib_workqueue); return 0; } @@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) int ret = 0; if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); - - if (!IS_ERR_OR_NULL(mcast->mc)) ib_sa_free_multicast(mcast->mc); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { @@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); - if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); } if (!mcast->ah) { @@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) ipoib_dbg_mcast(priv, "no address vector, " "but multicast join already started\n"); + else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + ipoib_mcast_sendonly_join(mcast); /* * If lookup completes between here and out:, don't @@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ + /* seperate between the wait to the leave*/ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) wait_for_completion(&mcast->done); list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { @@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_dbg_mcast(priv, "restarting multicast task\n"); + ipoib_mcast_stop_thread(dev, 0); + local_irq_save(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - - /* - * We have to cancel outside of the spinlock, but we have to - * take the rtnl lock or else we race with the removal of - * entries from the remove list in mcast_dev_flush as part - * of ipoib_stop(). We detect the drop of the ADMIN_UP flag - * to signal that we have hit this particular race, and we - * return since we know we don't need to do anything else - * anyway. - */ - while (!rtnl_trylock()) { - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) - return; - else - msleep(20); - } + /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); } - /* - * Restart our join task if needed - */ - ipoib_mcast_start_thread(dev); - rtnl_unlock(); + + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + ipoib_mcast_start_thread(dev); } #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index b72a753eb41d..c56d5d44c53b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) int ret, size; int i; - /* - * the various IPoIB tasks assume they will never race against - * themselves, so always use a single thread workqueue - */ - priv->wq = create_singlethread_workqueue("ipoib_wq"); - if (!priv->wq) { - printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); - return -ENODEV; - } - priv->pd = ib_alloc_pd(priv->ca); if (IS_ERR(priv->pd)) { printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); - goto out_free_wq; + return -ENODEV; } priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); @@ -252,10 +242,6 @@ out_free_mr: out_free_pd: ib_dealloc_pd(priv->pd); - -out_free_wq: - destroy_workqueue(priv->wq); - priv->wq = NULL; return -ENODEV; } @@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) if (ib_dealloc_pd(priv->pd)) ipoib_warn(priv, "ib_dealloc_pd failed\n"); - - if (priv->wq) { - flush_workqueue(priv->wq); - destroy_workqueue(priv->wq); - priv->wq = NULL; - } } void ipoib_event(struct ib_event_handler *handler, diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 2b0468e3df6a..56b96c63dc4b 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain; static int gic_shared_intrs; static int gic_vpes; static unsigned int gic_cpu_pin; +static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; static void __gic_irq_dispatch(void); @@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); break; case GIC_LOCAL_INT_TIMER: + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); break; case GIC_LOCAL_INT_PERFCTR: @@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr, if (cpu_has_veic) { /* Always use vector 1 in EIC mode */ gic_cpu_pin = 0; + timer_cpu_pin = gic_cpu_pin; set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, __gic_irq_dispatch); } else { gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, gic_irq_dispatch); + /* + * With the CMP implementation of SMP (deprecated), other CPUs + * are started by the bootloader and put into a timer based + * waiting poll loop. We must not re-route those CPU's local + * timer interrupts as the wait instruction will never finish, + * so just handle whatever CPU interrupt it is routed to by + * default. + * + * This workaround should be removed when CMP support is + * dropped. + */ + if (IS_ENABLED(CONFIG_MIPS_CMP) && + gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { + timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL, + GIC_VPE_TIMER_MAP)) & + GIC_MAP_MSK; + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + + GIC_CPU_PIN_OFFSET + + timer_cpu_pin, + gic_irq_dispatch); + } else { + timer_cpu_pin = gic_cpu_pin; + } } gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 0b380603a578..d7c286656a25 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, add_ai(plci, &parms[5]); sig_req(plci, REJECT, 0); } - else if (Reject == 1 || Reject > 9) + else if (Reject == 1 || Reject >= 9) { add_ai(plci, &parms[5]); sig_req(plci, HANGUP, 0); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index da3604e73e8a..1695ee5f3ffc 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -72,6 +72,19 @@ __acquires(bitmap->lock) /* this page has not been allocated yet */ spin_unlock_irq(&bitmap->lock); + /* It is possible that this is being called inside a + * prepare_to_wait/finish_wait loop from raid5c:make_request(). + * In general it is not permitted to sleep in that context as it + * can cause the loop to spin freely. + * That doesn't apply here as we can only reach this point + * once with any loop. + * When this function completes, either bp[page].map or + * bp[page].hijacked. In either case, this function will + * abort before getting to this point again. So there is + * no risk of a free-spin, and so it is safe to assert + * that sleeping here is allowed. + */ + sched_annotate_sleep(); mappage = kzalloc(PAGE_SIZE, GFP_NOIO); spin_lock_irq(&bitmap->lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c1b0d52bfcb0..b98765f6f77f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf, (unsigned long long)sh->sector, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); } + + if (rcw > disks && rmw > disks && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + set_bit(STRIPE_DELAYED, &sh->state); + /* now if nothing is locked, and if we have enough data, * we can start a write request */ diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 5e40a8b68cbe..b3b922adc0e4 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, cfhsi = netdev_priv(dev); cfhsi_netlink_parms(data, cfhsi); - dev_net_set(cfhsi->ndev, src_net); get_ops = symbol_get(cfhsi_get_ops); if (!get_ops) { diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 7a5e4aa5415e..77f1f6048ddd 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -45,7 +45,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !ARM ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from @@ -142,7 +142,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !ARM ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 5b22764ba88d..27245efe9f50 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) do { /* WARNING: MACE_IR is a READ/CLEAR port! */ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); + if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS) + return IRQ_NONE; pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7bb5f07dbeef..e5ffb2ccb67d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, HASHTBLSZ); @@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) break; } - /* The Queue and Channel counts are zero based so increment them + /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ hw_feat->rx_q_cnt++; hw_feat->tx_q_cnt++; hw_feat->rx_ch_cnt++; hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; DBGPR("<--xgbe_get_all_hw_features\n"); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 83a50280bb70..793f3b73eeff 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; + /* read fpqnum field after dataaddr field */ + dma_rmb(); if (is_rx_desc(raw_desc)) ret = xgene_enet_rx_frame(ring, raw_desc); else diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 7403dff8f14a..905ac5f5d9a6 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -32,7 +32,8 @@ config CS89x0 will be called cs89x0. config CS89x0_PLATFORM - bool "CS89x0 platform driver support" + bool "CS89x0 platform driver support" if HAS_IOPORT_MAP + default !HAS_IOPORT_MAP depends on CS89x0 help Say Y to compile the cs89x0 driver as a platform driver. This diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3e1a9c1a67a9..fda12fb32ec7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 63c807c9b21c..edea13b0ee85 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work) static int igbvf_tso(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, + __be16 protocol) { struct e1000_adv_tx_context_desc *context_desc; struct igbvf_buffer *buffer_info; @@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, l4len = tcp_hdrlen(skb); *hdr_len += l4len; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; @@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter, static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) { struct e1000_adv_tx_context_desc *context_desc; unsigned int i; @@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (skb->protocol) { + switch (protocol) { case htons(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) @@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, u8 hdr_len = 0; int count = 0; int tso = 0; + __be16 protocol = vlan_get_protocol(skb); if (test_bit(__IGBVF_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); @@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); } - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= IGBVF_TX_FLAGS_IPV4; first = tx_ring->next_to_use; tso = skb_is_gso(skb) ? - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; if (unlikely(tso < 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ed2c7de2304..67b02bde179e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (!vhdr) goto out_drop; - protocol = vhdr->h_vlan_encapsulated_proto; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } + protocol = vlan_get_protocol(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && adapter->ptp_clock && diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 62a0d8e0f17d..38c7a0be8197 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (skb->protocol == htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; - switch (skb->protocol) { + switch (first->protocol) { case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index bdd4eea2247c..210691c89b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -235,7 +235,8 @@ do { \ extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; -#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) +#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ + MLX4_MFUNC_MAX)) #define ALL_SLAVES 0xff struct mlx4_bitmap { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 18e5de72e9b4..4e1f58cf19ce 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); @@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget) napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ + work_done = budget; } return work_done; @@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* need a repoll */ + work_done = budget; } return work_done; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 6c904a6cad2a..ef5aed3b1225 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, { struct ql_adapter *qdev = netdev_priv(ndev); int status = 0; + bool need_restart = netif_running(ndev); - status = ql_adapter_down(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring down the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_down(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring down the adapter\n"); + return status; + } } /* update the features with resent change */ ndev->features = features; - status = ql_adapter_up(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring up the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_up(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring up the adapter\n"); + return status; + } } + return status; } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index d2835bf7b4fb..3699b98d5b2c 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; } + nskb->queue_mapping = skb->queue_mapping; dev_kfree_skb(skb); skb = nskb; } diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9f49c0129a78..7cd4eb38abfa 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device, u64 req_id; unsigned int section_index = NETVSC_INVALID_INDEX; u32 msg_size = 0; - struct sk_buff *skb; + struct sk_buff *skb = NULL; u16 q_idx = packet->q_idx; @@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device, packet); skb = (struct sk_buff *) (unsigned long)packet->send_completion_tid; - if (skb) - dev_kfree_skb_any(skb); packet->page_buf_cnt = 0; } } @@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device, packet, ret); } + if (ret != 0) { + if (section_index != NETVSC_INVALID_INDEX) + netvsc_free_send_slot(net_device, section_index); + } else if (skb) { + dev_kfree_skb_any(skb); + } + return ret; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 7df221788cd4..919f4fccc322 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -17,7 +17,6 @@ #include <linux/fs.h> #include <linux/uio.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> @@ -81,7 +80,7 @@ static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6) + NETIF_F_TSO6 | NETIF_F_UFO) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) @@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: - pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", - current->comm); gso_type = SKB_GSO_UDP; - if (skb->protocol == htons(ETH_P_IPV6)) - ipv6_proxy_select_ident(skb); break; default: return -EINVAL; @@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) @@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } + + if (arg & TUN_F_UFO) + feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index 602c625d95d5..b5edc7f96a39 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, /* * See if we managed to reduce the size of the packet. */ - if (olen < isize) { + if (olen < isize && olen <= osize) { state->stats.comp_bytes += olen; state->stats.comp_packets++; } else { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8c8dc16839a7..10f9e4021b5a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -65,7 +65,6 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> @@ -187,7 +186,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6) + NETIF_F_TSO6|NETIF_F_UFO) int vnet_hdr_sz; int sndbuf; @@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } - skb_reset_network_header(skb); - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: - { - static bool warned; - - if (!warned) { - warned = true; - netdev_warn(tun->dev, - "%s: using disabled UFO feature; please fix this program\n", - current->comm); - } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - if (skb->protocol == htons(ETH_P_IPV6)) - ipv6_proxy_select_ident(skb); break; - } default: tun->dev->stats.rx_frame_errors++; kfree_skb(skb); @@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } + skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); rxhash = skb_get_hash(skb); @@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + if (arg & TUN_F_UFO) { + features |= NETIF_F_UFO; + arg &= ~TUN_F_UFO; + } } /* This gives the user a way to test for new features in future by diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 99b69af14274..4a1e9c489f1f 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) int ret; udelay(1); - ret = sr_read_reg(dev, EPCR, &tmp); + ret = sr_read_reg(dev, SR_EPCR, &tmp); if (ret < 0) return ret; @@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, mutex_lock(&dev->phy_mutex); - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); - sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPCR, 0x0); - ret = sr_read(dev, EPDR, 2, value); + sr_write_reg(dev, SR_EPCR, 0x0); + ret = sr_read(dev, SR_EPDR, 2, value); netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", phy, reg, *value, ret); @@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, mutex_lock(&dev->phy_mutex); - ret = sr_write(dev, EPDR, 2, &value); + ret = sr_write(dev, SR_EPDR, 2, &value); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); - sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : (EPCR_WEP | EPCR_ERPRW)); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPCR, 0x0); + sr_write_reg(dev, SR_EPCR, 0x0); out_unlock: mutex_unlock(&dev->phy_mutex); @@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) if (loc == MII_BMSR) { u8 value; - sr_read_reg(dev, NSR, &value); + sr_read_reg(dev, SR_NSR, &value); if (value & NSR_LINKST) rc = 1; } @@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev) int rc = 0; /* Get the Link Status directly */ - sr_read_reg(dev, NSR, &value); + sr_read_reg(dev, SR_NSR, &value); if (value & NSR_LINKST) rc = 1; @@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev) } } - sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); - sr_write_reg_async(dev, RCR, rx_ctl); + sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes); + sr_write_reg_async(dev, SR_RCR, rx_ctl); } static int sr9700_set_mac_address(struct net_device *netdev, void *p) @@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p) } memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - sr_write_async(dev, PAR, 6, netdev->dev_addr); + sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); return 0; } @@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; - sr_write_reg(dev, NCR, NCR_RST); + sr_write_reg(dev, SR_NCR, NCR_RST); udelay(20); /* read MAC @@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) * EEPROM automatically to PAR. In case there is no EEPROM externally, * a default MAC address is stored in PAR for making chip work properly. */ - if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { + if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) { netdev_err(netdev, "Error reading MAC address\n"); ret = -ENODEV; goto out; } /* power up and reset phy */ - sr_write_reg(dev, PRR, PRR_PHY_RST); + sr_write_reg(dev, SR_PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ mdelay(20); - sr_write_reg(dev, PRR, 0); + sr_write_reg(dev, SR_PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h index fd687c575e74..258b030277e7 100644 --- a/drivers/net/usb/sr9700.h +++ b/drivers/net/usb/sr9700.h @@ -14,13 +14,13 @@ /* sr9700 spec. register table on Linux platform */ /* Network Control Reg */ -#define NCR 0x00 +#define SR_NCR 0x00 #define NCR_RST (1 << 0) #define NCR_LBK (3 << 1) #define NCR_FDX (1 << 3) #define NCR_WAKEEN (1 << 6) /* Network Status Reg */ -#define NSR 0x01 +#define SR_NSR 0x01 #define NSR_RXRDY (1 << 0) #define NSR_RXOV (1 << 1) #define NSR_TX1END (1 << 2) @@ -30,7 +30,7 @@ #define NSR_LINKST (1 << 6) #define NSR_SPEED (1 << 7) /* Tx Control Reg */ -#define TCR 0x02 +#define SR_TCR 0x02 #define TCR_CRC_DIS (1 << 1) #define TCR_PAD_DIS (1 << 2) #define TCR_LC_CARE (1 << 3) @@ -38,7 +38,7 @@ #define TCR_EXCECM (1 << 5) #define TCR_LF_EN (1 << 6) /* Tx Status Reg for Packet Index 1 */ -#define TSR1 0x03 +#define SR_TSR1 0x03 #define TSR1_EC (1 << 2) #define TSR1_COL (1 << 3) #define TSR1_LC (1 << 4) @@ -46,7 +46,7 @@ #define TSR1_LOC (1 << 6) #define TSR1_TLF (1 << 7) /* Tx Status Reg for Packet Index 2 */ -#define TSR2 0x04 +#define SR_TSR2 0x04 #define TSR2_EC (1 << 2) #define TSR2_COL (1 << 3) #define TSR2_LC (1 << 4) @@ -54,7 +54,7 @@ #define TSR2_LOC (1 << 6) #define TSR2_TLF (1 << 7) /* Rx Control Reg*/ -#define RCR 0x05 +#define SR_RCR 0x05 #define RCR_RXEN (1 << 0) #define RCR_PRMSC (1 << 1) #define RCR_RUNT (1 << 2) @@ -62,87 +62,87 @@ #define RCR_DIS_CRC (1 << 4) #define RCR_DIS_LONG (1 << 5) /* Rx Status Reg */ -#define RSR 0x06 +#define SR_RSR 0x06 #define RSR_AE (1 << 2) #define RSR_MF (1 << 6) #define RSR_RF (1 << 7) /* Rx Overflow Counter Reg */ -#define ROCR 0x07 +#define SR_ROCR 0x07 #define ROCR_ROC (0x7F << 0) #define ROCR_RXFU (1 << 7) /* Back Pressure Threshold Reg */ -#define BPTR 0x08 +#define SR_BPTR 0x08 #define BPTR_JPT (0x0F << 0) #define BPTR_BPHW (0x0F << 4) /* Flow Control Threshold Reg */ -#define FCTR 0x09 +#define SR_FCTR 0x09 #define FCTR_LWOT (0x0F << 0) #define FCTR_HWOT (0x0F << 4) /* rx/tx Flow Control Reg */ -#define FCR 0x0A +#define SR_FCR 0x0A #define FCR_FLCE (1 << 0) #define FCR_BKPA (1 << 4) #define FCR_TXPEN (1 << 5) #define FCR_TXPF (1 << 6) #define FCR_TXP0 (1 << 7) /* Eeprom & Phy Control Reg */ -#define EPCR 0x0B +#define SR_EPCR 0x0B #define EPCR_ERRE (1 << 0) #define EPCR_ERPRW (1 << 1) #define EPCR_ERPRR (1 << 2) #define EPCR_EPOS (1 << 3) #define EPCR_WEP (1 << 4) /* Eeprom & Phy Address Reg */ -#define EPAR 0x0C +#define SR_EPAR 0x0C #define EPAR_EROA (0x3F << 0) #define EPAR_PHY_ADR_MASK (0x03 << 6) #define EPAR_PHY_ADR (0x01 << 6) /* Eeprom & Phy Data Reg */ -#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ +#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ /* Wakeup Control Reg */ -#define WCR 0x0F +#define SR_WCR 0x0F #define WCR_MAGICST (1 << 0) #define WCR_LINKST (1 << 2) #define WCR_MAGICEN (1 << 3) #define WCR_LINKEN (1 << 5) /* Physical Address Reg */ -#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ +#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ /* Multicast Address Reg */ -#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ +#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ /* 0x1e unused */ /* Phy Reset Reg */ -#define PRR 0x1F +#define SR_PRR 0x1F #define PRR_PHY_RST (1 << 0) /* Tx sdram Write Pointer Address Low */ -#define TWPAL 0x20 +#define SR_TWPAL 0x20 /* Tx sdram Write Pointer Address High */ -#define TWPAH 0x21 +#define SR_TWPAH 0x21 /* Tx sdram Read Pointer Address Low */ -#define TRPAL 0x22 +#define SR_TRPAL 0x22 /* Tx sdram Read Pointer Address High */ -#define TRPAH 0x23 +#define SR_TRPAH 0x23 /* Rx sdram Write Pointer Address Low */ -#define RWPAL 0x24 +#define SR_RWPAL 0x24 /* Rx sdram Write Pointer Address High */ -#define RWPAH 0x25 +#define SR_RWPAH 0x25 /* Rx sdram Read Pointer Address Low */ -#define RRPAL 0x26 +#define SR_RRPAL 0x26 /* Rx sdram Read Pointer Address High */ -#define RRPAH 0x27 +#define SR_RRPAH 0x27 /* Vendor ID register */ -#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ +#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ /* Product ID register */ -#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ +#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ /* CHIP Revision register */ -#define CHIPR 0x2C +#define SR_CHIPR 0x2C /* 0x2D --> 0xEF unused */ /* USB Device Address */ -#define USBDA 0xF0 +#define SR_USBDA 0xF0 #define USBDA_USBFA (0x7F << 0) /* RX packet Counter Reg */ -#define RXC 0xF1 +#define SR_RXC 0xF1 /* Tx packet Counter & USB Status Reg */ -#define TXC_USBS 0xF2 +#define SR_TXC_USBS 0xF2 #define TXC_USBS_TXC0 (1 << 0) #define TXC_USBS_TXC1 (1 << 1) #define TXC_USBS_TXC2 (1 << 2) @@ -150,7 +150,7 @@ #define TXC_USBS_SUSFLAG (1 << 6) #define TXC_USBS_RXFAULT (1 << 7) /* USB Control register */ -#define USBC 0xF4 +#define SR_USBC 0xF4 #define USBC_EP3NAK (1 << 4) #define USBC_EP3ACK (1 << 5) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5ca97713bfb3..059fdf1bf5ee 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: - { - static bool warned; - - if (!warned) { - warned = true; - netdev_warn(dev, - "host using disabled UFO feature; please fix it\n"); - } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; - } case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; @@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) @@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO + dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->hw_features |= NETIF_F_UFO; if (gso) - dev->features |= dev->hw_features & NETIF_F_ALL_TSO; + dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) @@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7fbd89fbe107..a8c755dcab14 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work) dev_put(vxlan->dev); } -static int vxlan_newlink(struct net *net, struct net_device *dev, +static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; __u32 vni; @@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!data[IFLA_VXLAN_ID]) return -EINVAL; - vxlan->net = dev_net(dev); + vxlan->net = src_net; vni = nla_get_u32(data[IFLA_VXLAN_ID]); dst->remote_vni = vni; @@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (data[IFLA_VXLAN_LINK] && (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { struct net_device *lowerdev - = __dev_get_by_index(net, dst->remote_ifindex); + = __dev_get_by_index(src_net, dst->remote_ifindex); if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); @@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; - if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 94e234975c61..a2fdd15f285a 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -25,7 +25,7 @@ if WAN # There is no way to detect a comtrol sv11 - force it modular for now. config HOSTESS_SV11 tristate "Comtrol Hostess SV-11 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS help Driver for Comtrol Hostess SV-11 network card which operates on low speed synchronous serial links at up to @@ -37,7 +37,7 @@ config HOSTESS_SV11 # The COSA/SRP driver has not been tested as non-modular yet. config COSA tristate "COSA/SRP sync serial boards support" - depends on ISA && m && ISA_DMA_API && HDLC + depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS ---help--- Driver for COSA and SRP synchronous serial boards. @@ -87,7 +87,7 @@ config LANMEDIA # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS help This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 9259a732e8a4..037f74f0fcf6 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, goto err_rx_unbind; } queue->task = task; + get_task_struct(task); task = kthread_create(xenvif_dealloc_kthread, (void *)queue, "%s-dealloc", queue->name); @@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif) if (queue->task) { kthread_stop(queue->task); + put_task_struct(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 908e65e9b821..c8ce701a7efb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data) */ if (unlikely(vif->disabled && queue->id == 0)) { xenvif_carrier_off(vif); - xenvif_rx_queue_purge(queue); - continue; + break; } if (!skb_queue_empty(&queue->rx_queue)) diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index df781cdf13c1..17ca98657a28 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, struct msi_msg msg; struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); + if (desc->msi_attrib.is_msix) + return -EINVAL; + irq = assign_irq(1, desc, &pos); if (irq < 0) return irq; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e52356aa09b8..903d5078b5ed 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); +static void quirk_io(struct pci_dev *dev, int pos, unsigned size, + const char *name) +{ + u32 region; + struct pci_bus_region bus_region; + struct resource *res = dev->resource + pos; + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); + + if (!region) + return; + + res->name = pci_name(dev); + res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; + res->flags |= + (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); + region &= ~(size - 1); + + /* Convert from PCI bus to resource space */ + bus_region.start = region; + bus_region.end = region + size - 1; + pcibios_bus_to_resource(dev->bus, res, &bus_region); + + dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", + name, PCI_BASE_ADDRESS_0 + (pos << 2), res); +} + /* * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS * ver. 1.33 20070103) don't set the correct ISA PCI region header info. * BAR0 should be 8 bytes; instead, it may be set to something like 8k * (which conflicts w/ BAR1's memory range). + * + * CS553x's ISA PCI BARs may also be read-only (ref: + * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). */ static void quirk_cs5536_vsa(struct pci_dev *dev) { + static char *name = "CS5536 ISA bridge"; + if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); + quirk_io(dev, 0, 8, name); /* SMB */ + quirk_io(dev, 1, 256, name); /* GPIO */ + quirk_io(dev, 2, 64, name); /* MFGPT */ + dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", + name); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 1dba62c5cf6a..1efebc9eedfb 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref) struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; struct scsi_device *sdev = scsi_dh_data->sdev; + scsi_dh->detach(sdev); + spin_lock_irq(sdev->request_queue->queue_lock); sdev->scsi_dh_data = NULL; spin_unlock_irq(sdev->request_queue->queue_lock); - scsi_dh->detach(sdev); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); module_put(scsi_dh->module); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 399516925d80..05ea0d49a3a3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk) */ sd_set_flush_flag(sdkp); - max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), - sdkp->max_xfer_blocks); + max_xfer = sdkp->max_xfer_blocks; max_xfer <<= ilog2(sdp->sector_size) - 9; + + max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), + max_xfer); blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); set_capacity(disk, sdkp->capacity); sd_config_write_same(sdkp); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 4cda994d3f40..9b80d54d4ddb 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -342,8 +342,7 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), - GFP_KERNEL); + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; } @@ -382,6 +381,16 @@ static int dspi_setup(struct spi_device *spi) return dspi_setup_transfer(spi, NULL); } +static void dspi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + + dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", + spi->master->bus_num, spi->chip_select); + + kfree(chip); +} + static irqreturn_t dspi_interrupt(int irq, void *dev_id) { struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; @@ -467,6 +476,7 @@ static int dspi_probe(struct platform_device *pdev) dspi->bitbang.master->setup = dspi_setup; dspi->bitbang.master->dev.of_node = pdev->dev.of_node; + master->cleanup = dspi_cleanup; master->mode_bits = SPI_CPOL | SPI_CPHA; master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | SPI_BPW_MASK(16); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 961b97d43b43..fe1b7699fab6 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -823,6 +823,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, struct dma_slave_config slave_config = {}; int ret; + /* use pio mode for i.mx6dl chip TKT238285 */ + if (of_machine_is_compatible("fsl,imx6dl")) + return 0; + /* Prepare for TX DMA: */ master->dma_tx = dma_request_slave_channel(dev, "tx"); if (!master->dma_tx) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index d415d69dc237..9484d5652ca5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net) break; } /* TODO: Should check and handle checksum. */ + + hdr.num_buffers = cpu_to_vhost16(vq, headcount); if (likely(mergeable) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, + memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers, offsetof(typeof(hdr), num_buffers), sizeof hdr.num_buffers)) { vq_err(vq, "Failed num_buffers write"); @@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx, long ret = 0; int copy_ret; + /* + * The mutex can block and wake us up and that will cause + * wait_event_interruptible_hrtimeout() to schedule without sleeping + * and repeat. This should be rare enough that it doesn't cause + * peformance issues. See the comment in read_events() for more detail. + */ + sched_annotate_sleep(); mutex_lock(&ctx->ring_lock); /* Access to ->ring_pages here is protected by ctx->ring_lock. */ diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da16f2be..1a9585d4380a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { + blk_finish_plug(&plug); mutex_unlock(&log_root_tree->log_mutex); ret = root_log_ctx.log_ret; goto out; diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 9c56ef776407..7febcf2475c5 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags) *flags = CIFSSEC_MUST_NTLMV2; else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) *flags = CIFSSEC_MUST_NTLM; - else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) + else if (CIFSSEC_MUST_LANMAN && + (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) *flags = CIFSSEC_MUST_LANMAN; - else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) + else if (CIFSSEC_MUST_PLNTXT && + (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) *flags = CIFSSEC_MUST_PLNTXT; *flags |= signflags; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 96b7e9b7706d..74f12877493a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; + bool oplock_break_cancelled; spin_lock(&cifs_file_list_lock); if (--cifs_file->count > 0) { @@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) } spin_unlock(&cifs_file_list_lock); - cancel_work_sync(&cifs_file->oplock_break); + oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); if (!tcon->need_reconnect && !cifs_file->invalidHandle) { struct TCP_Server_Info *server = tcon->ses->server; @@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) _free_xid(xid); } + if (oplock_break_cancelled) + cifs_done_oplock_break(cifsi); + cifs_del_pending_open(&open); /* diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 6c1566366a66..a4232ec4f2ba 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); - memset(wpwd, 0, 129 * sizeof(__le16)); + memzero_explicit(wpwd, sizeof(wpwd)); return rc; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 91093cd74f0d..385704027575 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -141,7 +141,6 @@ enum { * @ti_save: Backup of journal_info field of task_struct * @ti_flags: Flags * @ti_count: Nest level - * @ti_garbage: List of inode to be put when releasing semaphore */ struct nilfs_transaction_info { u32 ti_magic; @@ -150,7 +149,6 @@ struct nilfs_transaction_info { one of other filesystems has a bug. */ unsigned short ti_flags; unsigned short ti_count; - struct list_head ti_garbage; }; /* ti_magic */ diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 7ef18fc656c2..469086b9f99b 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb, ti->ti_count = 0; ti->ti_save = cur_ti; ti->ti_magic = NILFS_TI_MAGIC; - INIT_LIST_HEAD(&ti->ti_garbage); current->journal_info = ti; for (;;) { @@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); current->journal_info = ti->ti_save; - if (!list_empty(&ti->ti_garbage)) - nilfs_dispose_list(nilfs, &ti->ti_garbage, 0); } static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, @@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs, } } +static void nilfs_iput_work_func(struct work_struct *work) +{ + struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, + sc_iput_work); + struct the_nilfs *nilfs = sci->sc_super->s_fs_info; + + nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); +} + static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, struct nilfs_root *root) { @@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { - struct nilfs_transaction_info *ti = current->journal_info; struct nilfs_inode_info *ii, *n; + int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { @@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, clear_bit(NILFS_I_BUSY, &ii->i_state); brelse(ii->i_bh); ii->i_bh = NULL; - list_move_tail(&ii->i_dirty, &ti->ti_garbage); + list_del_init(&ii->i_dirty); + if (!ii->vfs_inode.i_nlink) { + /* + * Defer calling iput() to avoid a deadlock + * over I_SYNC flag for inodes with i_nlink == 0 + */ + list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); + defer_iput = true; + } else { + spin_unlock(&nilfs->ns_inode_lock); + iput(&ii->vfs_inode); + spin_lock(&nilfs->ns_inode_lock); + } } spin_unlock(&nilfs->ns_inode_lock); + + if (defer_iput) + schedule_work(&sci->sc_iput_work); } /* @@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, INIT_LIST_HEAD(&sci->sc_segbufs); INIT_LIST_HEAD(&sci->sc_write_logs); INIT_LIST_HEAD(&sci->sc_gc_inodes); + INIT_LIST_HEAD(&sci->sc_iput_queue); + INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); init_timer(&sci->sc_timer); sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; @@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) ret = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_transaction_unlock(sci->sc_super); + flush_work(&sci->sc_iput_work); + } while (ret && retrycount-- > 0); } @@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); + if (flush_work(&sci->sc_iput_work)) + flag = true; + if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); @@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } + if (!list_empty(&sci->sc_iput_queue)) { + nilfs_warning(sci->sc_super, __func__, + "iput queue is not empty\n"); + nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); + } + WARN_ON(!list_empty(&sci->sc_segbufs)); WARN_ON(!list_empty(&sci->sc_write_logs)); diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 38a1d0013314..a48d6de1e02c 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/buffer_head.h> +#include <linux/workqueue.h> #include <linux/nilfs2_fs.h> #include "nilfs.h" @@ -92,6 +93,8 @@ struct nilfs_segsum_pointer { * @sc_nblk_inc: Block count of current generation * @sc_dirty_files: List of files to be written * @sc_gc_inodes: List of GC inodes having blocks to be written + * @sc_iput_queue: list of inodes for which iput should be done + * @sc_iput_work: work struct to defer iput call * @sc_freesegs: array of segment numbers to be freed * @sc_nfreesegs: number of segments on @sc_freesegs * @sc_dsync_inode: inode whose data pages are written for a sync operation @@ -135,6 +138,8 @@ struct nilfs_sc_info { struct list_head sc_dirty_files; struct list_head sc_gc_inodes; + struct list_head sc_iput_queue; + struct work_struct sc_iput_work; __u64 *sc_freesegs; size_t sc_nfreesegs; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 515a35e2a48a..960e666c51e4 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query + * @type: first vlan protocol + * @depth: buffer to store length of eth and vlan tags in bytes * * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + int *depth) { - __be16 protocol = 0; - - if (vlan_tx_tag_present(skb) || - skb->protocol != cpu_to_be16(ETH_P_8021Q)) - protocol = skb->protocol; - else { - __be16 proto, *protop; - protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, - h_vlan_encapsulated_proto), - sizeof(proto), &proto); - if (likely(protop)) - protocol = *protop; + unsigned int vlan_depth = skb->mac_len; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (vlan_depth) { + if (WARN_ON(vlan_depth < VLAN_HLEN)) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); } - return protocol; + if (depth) + *depth = vlan_depth; + + return type; +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 vlan_get_protocol(struct sk_buff *skb) +{ + return __vlan_get_protocol(skb, skb->protocol, NULL); } static inline void vlan_set_encap_proto(struct sk_buff *skb, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 25c791e295fd..5f3a9aa7225d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -97,7 +97,7 @@ enum { MLX4_MAX_NUM_PF = 16, MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, - MLX4_MFUNC_MAX = 80, + MLX4_MFUNC_MAX = 128, MLX4_MAX_EQ_NUM = 1024, MLX4_MFUNC_EQ_NUM = 4, MLX4_MFUNC_MAX_EQES = 8, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e08e21e5f601..c72851328ca9 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -173,7 +173,7 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ - if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ rcu_read_lock_sched_notrace(); \ rcu_dereference_sched(__tracepoint_##name.funcs);\ rcu_read_unlock_sched_notrace(); \ diff --git a/include/linux/wait.h b/include/linux/wait.h index 2232ed16635a..37423e0e1379 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -363,7 +363,6 @@ do { \ */ #define wait_event_cmd(wq, condition, cmd1, cmd2) \ do { \ - might_sleep(); \ if (condition) \ break; \ __wait_event_cmd(wq, condition, cmd1, cmd2); \ diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 7ee2df083542..dc8fd81412bf 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -22,9 +22,9 @@ struct flow_keys { __be32 ports; __be16 port16[2]; }; - u16 thoff; - u16 n_proto; - u8 ip_proto; + u16 thoff; + __be16 n_proto; + u8 ip_proto; }; bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, diff --git a/include/net/ip.h b/include/net/ip.h index f7cbd703d15d..09cf5aebb283 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4292929392b0..6e416f6d3e3c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } +u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, + struct in6_addr *src); +void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); void ipv6_proxy_select_ident(struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); @@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel) { if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { - __be32 hash; + u32 hash; hash = skb_get_hash(skb); @@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, */ hash ^= hash >> 12; - flowlabel = hash & IPV6_FLOWLABEL_MASK; + flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; } return flowlabel; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3ae969e3acf0..9eaaa7884586 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -530,6 +530,8 @@ enum nft_chain_type { int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type); +int nft_chain_validate_hooks(const struct nft_chain *chain, + unsigned int hook_flags); struct nft_stats { u64 bytes; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 24945cefc4fd..0ffef1a38efc 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -52,6 +52,7 @@ struct netns_ipv4 { struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; + struct sock * __percpu *tcp_sk; struct netns_frags frags; #ifdef CONFIG_NETFILTER struct xt_table *iptable_filter; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 3d282cbb66bf..c605d305c577 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -79,6 +79,9 @@ struct Qdisc { struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_queue __percpu *cpu_qstats; + struct Qdisc *next_sched; struct sk_buff *gso_skb; /* @@ -86,15 +89,9 @@ struct Qdisc { */ unsigned long state; struct sk_buff_head q; - union { - struct gnet_stats_basic_packed bstats; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; - } __packed; + struct gnet_stats_basic_packed bstats; unsigned int __state; - union { - struct gnet_stats_queue qstats; - struct gnet_stats_queue __percpu *cpu_qstats; - } __packed; + struct gnet_stats_queue qstats; struct rcu_head rcu_head; int padded; atomic_t refcnt; diff --git a/include/net/tcp.h b/include/net/tcp.h index f50f29faf76f..9d9111ef43ae 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); int tcp_set_congestion_control(struct sock *sk, const char *name); -void tcp_slow_start(struct tcp_sock *tp, u32 acked); -void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); +u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); +void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); u32 tcp_reno_ssthresh(struct sock *sk); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0d74f1de99aa..65994a19e840 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) { - size_t copy_sz; - - copy_sz = min_t(size_t, len, udata->outlen); - return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0; + return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; } /** diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h index 2609048c1d44..3a34f6edc2d1 100644 --- a/include/sound/ak4113.h +++ b/include/sound/ak4113.h @@ -286,7 +286,7 @@ struct ak4113 { ak4113_write_t *write; ak4113_read_t *read; void *private_data; - unsigned int init:1; + atomic_t wq_processing; spinlock_t lock; unsigned char regmap[AK4113_WRITABLE_REGS]; struct snd_kcontrol *kctls[AK4113_CONTROLS]; diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h index 52f02a60dba7..069299a88915 100644 --- a/include/sound/ak4114.h +++ b/include/sound/ak4114.h @@ -168,7 +168,7 @@ struct ak4114 { ak4114_write_t * write; ak4114_read_t * read; void * private_data; - unsigned int init: 1; + atomic_t wq_processing; spinlock_t lock; unsigned char regmap[6]; unsigned char txcsb[5]; diff --git a/include/sound/soc.h b/include/sound/soc.h index b4fca9aed2a2..ac8b333acb4d 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg, unsigned int mask, unsigned int value); #ifdef CONFIG_SND_SOC_AC97_BUS +struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec); struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h index 13391d288107..0e7635765153 100644 --- a/include/trace/events/tlb.h +++ b/include/trace/events/tlb.h @@ -13,11 +13,13 @@ { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \ { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" } -TRACE_EVENT(tlb_flush, +TRACE_EVENT_CONDITION(tlb_flush, TP_PROTO(int reason, unsigned long pages), TP_ARGS(reason, pages), + TP_CONDITION(cpu_online(smp_processor_id())), + TP_STRUCT__entry( __field( int, reason) __field(unsigned long, pages) diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 4275b961bf60..867cc5084afb 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -90,7 +90,6 @@ enum { }; enum { - IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, }; @@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp { __u8 reserved[4]; }; -enum { - IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0, -}; - -struct ib_uverbs_ex_query_device { - __u32 comp_mask; - __u32 reserved; -}; - -struct ib_uverbs_odp_caps { - __u64 general_caps; - struct { - __u32 rc_odp_caps; - __u32 uc_odp_caps; - __u32 ud_odp_caps; - } per_transport_caps; - __u32 reserved; -}; - -struct ib_uverbs_ex_query_device_resp { - struct ib_uverbs_query_device_resp base; - __u32 comp_mask; - __u32 reserved; - struct ib_uverbs_odp_caps odp_caps; -}; - struct ib_uverbs_query_port { __u64 response; __u8 port_num; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e628cb11b560..5eab11d4b747 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1814,6 +1814,10 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_period = 0; dl_se->flags = 0; dl_se->dl_bw = 0; + + dl_se->dl_throttled = 0; + dl_se->dl_new = 1; + dl_se->dl_yielded = 0; } /* @@ -1839,7 +1843,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #endif RB_CLEAR_NODE(&p->dl.rb_node); - hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + init_dl_task_timer(&p->dl); __dl_clear_params(p); INIT_LIST_HEAD(&p->rt.run_list); @@ -2049,6 +2053,9 @@ static inline int dl_bw_cpus(int i) * allocated bandwidth to reflect the new situation. * * This function is called while holding p's rq->lock. + * + * XXX we should delay bw change until the task's 0-lag point, see + * __setparam_dl(). */ static int dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr) @@ -3251,15 +3258,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) { struct sched_dl_entity *dl_se = &p->dl; - init_dl_task_timer(dl_se); dl_se->dl_runtime = attr->sched_runtime; dl_se->dl_deadline = attr->sched_deadline; dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; dl_se->flags = attr->sched_flags; dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); - dl_se->dl_throttled = 0; - dl_se->dl_new = 1; - dl_se->dl_yielded = 0; + + /* + * Changing the parameters of a task is 'tricky' and we're not doing + * the correct thing -- also see task_dead_dl() and switched_from_dl(). + * + * What we SHOULD do is delay the bandwidth release until the 0-lag + * point. This would include retaining the task_struct until that time + * and change dl_overflow() to not immediately decrement the current + * amount. + * + * Instead we retain the current runtime/deadline and let the new + * parameters take effect after the current reservation period lapses. + * This is safe (albeit pessimistic) because the 0-lag point is always + * before the current scheduling deadline. + * + * We can still have temporary overloads because we do not delay the + * change in bandwidth until that time; so admission control is + * not on the safe side. It does however guarantee tasks will never + * consume more than promised. + */ } /* @@ -4642,6 +4665,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur, struct dl_bw *cur_dl_b; unsigned long flags; + if (!cpumask_weight(cur)) + return ret; + rcu_read_lock_sched(); cur_dl_b = dl_bw_of(cpumask_any(cur)); trial_cpus = cpumask_weight(trial); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b52092f2636d..726470d47f87 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1094,6 +1094,7 @@ static void task_dead_dl(struct task_struct *p) * Since we are TASK_DEAD we won't slip out of the domain! */ raw_spin_lock_irq(&dl_b->lock); + /* XXX we should retain the bw until 0-lag */ dl_b->total_bw -= p->dl.dl_bw; raw_spin_unlock_irq(&dl_b->lock); @@ -1614,8 +1615,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p) static void switched_from_dl(struct rq *rq, struct task_struct *p) { + /* XXX we should retain the bw until 0-lag */ cancel_dl_timer(rq, p); - __dl_clear_params(p); /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40667cbf371b..fe331fc391f5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1730,7 +1730,7 @@ static int preferred_group_nid(struct task_struct *p, int nid) nodes = node_online_map; for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { unsigned long max_faults = 0; - nodemask_t max_group; + nodemask_t max_group = NODE_MASK_NONE; int a, b; /* Are there nodes at this distance from each other? */ diff --git a/kernel/smpboot.c b/kernel/smpboot.c index f032fb5284e3..40190f28db35 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) unsigned int cpu; int ret = 0; + get_online_cpus(); mutex_lock(&smpboot_threads_lock); for_each_online_cpu(cpu) { ret = __smpboot_create_thread(plug_thread, cpu); @@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) list_add(&plug_thread->list, &hotplug_threads); out: mutex_unlock(&smpboot_threads_lock); + put_online_cpus(); return ret; } EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 37e50aadd471..d8c724cda37b 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); boot = ktime_add(mono, off_boot); xtim = ktime_add(mono, off_real); - tai = ktime_add(xtim, off_tai); + tai = ktime_add(mono, off_tai); base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; diff --git a/lib/checksum.c b/lib/checksum.c index 129775eb6de6..8b39e86dbab5 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum) EXPORT_SYMBOL(csum_partial_copy); #ifndef csum_tcpudp_nofold +static inline u32 from64to32(u64 x) +{ + /* add up 32-bit and 32-bit for 32+c bit */ + x = (x & 0xffffffff) + (x >> 32); + /* add up carry.. */ + x = (x & 0xffffffff) + (x >> 32); + return (u32)x; +} + __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, #else s += (proto + len) << 8; #endif - s += (s >> 32); - return (__force __wsum)s; + return (__force __wsum)from64to32(s); } EXPORT_SYMBOL(csum_tcpudp_nofold); #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 683b4782019b..2f6893c2f01b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list) * mem_cgroup_migrate - migrate a charge to another page * @oldpage: currently charged page * @newpage: page to transfer the charge to - * @lrucare: both pages might be on the LRU already + * @lrucare: either or both pages might be on the LRU already * * Migrate the charge from @oldpage to @newpage. * diff --git a/mm/nommu.c b/mm/nommu.c index b51eadf6d952..28bd8c4dff6f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -59,6 +59,7 @@ #endif void *high_memory; +EXPORT_SYMBOL(high_memory); struct page *mem_map; unsigned long max_mapnr; unsigned long highest_memmap_pfn; diff --git a/mm/pagewalk.c b/mm/pagewalk.c index ad83195521f2..b264bda46e1b 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end, */ if ((vma->vm_start <= addr) && (vma->vm_flags & VM_PFNMAP)) { - next = vma->vm_end; + if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) + break; pgd = pgd_offset(walk->mm, next); continue; } diff --git a/mm/shmem.c b/mm/shmem.c index 73ba1df7c8ba..993e6ba689cc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, */ oldpage = newpage; } else { - mem_cgroup_migrate(oldpage, newpage, false); + mem_cgroup_migrate(oldpage, newpage, true); lru_cache_add_anon(newpage); *pagep = newpage; } diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index b0330aecbf97..3244aead0926 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -265,22 +265,12 @@ out: data[NFT_REG_VERDICT].verdict = NF_DROP; } -static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) +static int nft_reject_bridge_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) { - struct nft_base_chain *basechain; - - if (chain->flags & NFT_BASE_CHAIN) { - basechain = nft_base_chain(chain); - - switch (basechain->ops[0].hooknum) { - case NF_BR_PRE_ROUTING: - case NF_BR_LOCAL_IN: - break; - default: - return -EOPNOTSUPP; - } - } - return 0; + return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) | + (1 << NF_BR_LOCAL_IN)); } static int nft_reject_bridge_init(const struct nft_ctx *ctx, @@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx, struct nft_reject *priv = nft_expr_priv(expr); int icmp_code, err; - err = nft_reject_bridge_validate_hooks(ctx->chain); + err = nft_reject_bridge_validate(ctx, expr, NULL); if (err < 0) return err; @@ -341,13 +331,6 @@ nla_put_failure: return -1; } -static int nft_reject_bridge_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_reject_bridge_validate_hooks(ctx->chain); -} - static struct nft_expr_type nft_reject_bridge_type; static const struct nft_expr_ops nft_reject_bridge_ops = { .type = &nft_reject_bridge_type, diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 4589ff67bfa9..67a4a36febd1 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, ASSERT_RTNL(); caifdev = netdev_priv(dev); caif_netlink_parms(data, &caifdev->conn_req); - dev_net_set(caifdev->netdev, src_net); ret = register_netdevice(dev); if (ret) diff --git a/net/core/dev.c b/net/core/dev.c index 171420e75b03..7fe82929f509 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help); __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { - unsigned int vlan_depth = skb->mac_len; __be16 type = skb->protocol; /* Tunnel gso handlers can set protocol to ethernet. */ @@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) type = eth->h_proto; } - /* if skb->protocol is 802.1Q/AD then the header should already be - * present at mac_len - VLAN_HLEN (if mac_len > 0), or at - * ETH_HLEN otherwise - */ - if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { - if (vlan_depth) { - if (WARN_ON(vlan_depth < VLAN_HLEN)) - return 0; - vlan_depth -= VLAN_HLEN; - } else { - vlan_depth = ETH_HLEN; - } - do { - struct vlan_hdr *vh; - - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) - return 0; - - vh = (struct vlan_hdr *)(skb->data + vlan_depth); - type = vh->h_vlan_encapsulated_proto; - vlan_depth += VLAN_HLEN; - } while (type == htons(ETH_P_8021Q) || - type == htons(ETH_P_8021AD)); - } - - *depth = vlan_depth; - - return type; + return __vlan_get_protocol(skb, type, depth); } /** @@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev, } EXPORT_SYMBOL(netdev_upper_dev_unlink); -void netdev_adjacent_add_links(struct net_device *dev) +static void netdev_adjacent_add_links(struct net_device *dev) { struct netdev_adjacent *iter; @@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev) } } -void netdev_adjacent_del_links(struct net_device *dev) +static void netdev_adjacent_del_links(struct net_device *dev) { struct netdev_adjacent *iter; @@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) if (!queue) return NULL; netdev_init_one_queue(dev, queue, NULL); - queue->qdisc = &noop_qdisc; + RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); queue->qdisc_sleeping = &noop_qdisc; rcu_assign_pointer(dev->ingress_queue, queue); #endif diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9cf6fe9ddc0c..446cbaf81185 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags) goto errout; } + if (!skb->len) + goto errout; + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return 0; errout: WARN_ON(err == -EMSGSIZE); kfree_skb(skb); - rtnl_set_sk_err(net, RTNLGRP_LINK, err); + if (err) + rtnl_set_sk_err(net, RTNLGRP_LINK, err); return err; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b50861b22b6b..c373c0708d97 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset, /* * Generic function to send a packet as reply to another packet. * Used to send some TCP resets/acks so far. - * - * Use a fake percpu inet socket to avoid false sharing and contention. */ -static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { - .sk = { - .__sk_common = { - .skc_refcnt = ATOMIC_INIT(1), - }, - .sk_wmem_alloc = ATOMIC_INIT(1), - .sk_allocation = GFP_ATOMIC, - .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE), - }, - .pmtudisc = IP_PMTUDISC_WANT, - .uc_ttl = -1, -}; - -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, @@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, struct ipcm_cookie ipc; struct flowi4 fl4; struct rtable *rt = skb_rtable(skb); + struct net *net = sock_net(sk); struct sk_buff *nskb; - struct sock *sk; - struct inet_sock *inet; int err; if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) @@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet = &get_cpu_var(unicast_sock); + inet_sk(sk)->tos = arg->tos; - inet->tos = arg->tos; - sk = &inet->sk; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; - sock_net_set(sk, net); - __skb_queue_head_init(&sk->sk_write_queue); sk->sk_sndbuf = sysctl_wmem_default; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); @@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; - skb_orphan(nskb); skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_push_pending_frames(sk, &fl4); } out: - put_cpu_var(unicast_sock); - ip_rt_put(rt); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d58dd0ec3e53..52e1f2bf0ca2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (dst->dev->mtu < mtu) return; + if (rt->rt_pmtu && rt->rt_pmtu < mtu) + return; + if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index bb395d46a389..c037644eafb7 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_slow_start(tp, acked); else { bictcp_update(ca, tp->snd_cwnd); - tcp_cong_avoid_ai(tp, ca->cnt); + tcp_cong_avoid_ai(tp, ca->cnt, 1); } } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 27ead0dd16bc..8670e68e2ce6 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and * returns the leftover acks to adjust cwnd in congestion avoidance mode. */ -void tcp_slow_start(struct tcp_sock *tp, u32 acked) +u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) { u32 cwnd = tp->snd_cwnd + acked; if (cwnd > tp->snd_ssthresh) cwnd = tp->snd_ssthresh + 1; + acked -= cwnd - tp->snd_cwnd; tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); + + return acked; } EXPORT_SYMBOL_GPL(tcp_slow_start); -/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ -void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) +/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), + * for every packet that was ACKed. + */ +void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) { + tp->snd_cwnd_cnt += acked; if (tp->snd_cwnd_cnt >= w) { - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - tp->snd_cwnd_cnt = 0; - } else { - tp->snd_cwnd_cnt++; + u32 delta = tp->snd_cwnd_cnt / w; + + tp->snd_cwnd_cnt -= delta * w; + tp->snd_cwnd += delta; } + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); } EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); @@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) return; /* In "safe" area, increase. */ - if (tp->snd_cwnd <= tp->snd_ssthresh) - tcp_slow_start(tp, acked); + if (tp->snd_cwnd <= tp->snd_ssthresh) { + acked = tcp_slow_start(tp, acked); + if (!acked) + return; + } /* In dangerous area, increase slowly. */ - else - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); } EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 6b6002416a73..4b276d1ed980 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -93,9 +93,7 @@ struct bictcp { u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ -#define ACK_RATIO_SHIFT 4 -#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) - u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ + u16 unused; u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ u32 round_start; /* beginning of each round */ @@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca) ca->bic_K = 0; ca->delay_min = 0; ca->epoch_start = 0; - ca->delayed_ack = 2 << ACK_RATIO_SHIFT; ca->ack_cnt = 0; ca->tcp_cwnd = 0; ca->found = 0; @@ -205,23 +202,30 @@ static u32 cubic_root(u64 a) /* * Compute congestion window to use. */ -static inline void bictcp_update(struct bictcp *ca, u32 cwnd) +static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) { u32 delta, bic_target, max_cnt; u64 offs, t; - ca->ack_cnt++; /* count the number of ACKs */ + ca->ack_cnt += acked; /* count the number of ACKed packets */ if (ca->last_cwnd == cwnd && (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) return; + /* The CUBIC function can update ca->cnt at most once per jiffy. + * On all cwnd reduction events, ca->epoch_start is set to 0, + * which will force a recalculation of ca->cnt. + */ + if (ca->epoch_start && tcp_time_stamp == ca->last_time) + goto tcp_friendliness; + ca->last_cwnd = cwnd; ca->last_time = tcp_time_stamp; if (ca->epoch_start == 0) { ca->epoch_start = tcp_time_stamp; /* record beginning */ - ca->ack_cnt = 1; /* start counting */ + ca->ack_cnt = acked; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ if (ca->last_max_cwnd <= cwnd) { @@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ +tcp_friendliness: /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; @@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } } - ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; if (ca->cnt == 0) /* cannot be zero */ ca->cnt = 1; } @@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) if (tp->snd_cwnd <= tp->snd_ssthresh) { if (hystart && after(ack, ca->end_seq)) bictcp_hystart_reset(sk); - tcp_slow_start(tp, acked); - } else { - bictcp_update(ca, tp->snd_cwnd); - tcp_cong_avoid_ai(tp, ca->cnt); + acked = tcp_slow_start(tp, acked); + if (!acked) + return; } + bictcp_update(ca, tp->snd_cwnd, acked); + tcp_cong_avoid_ai(tp, ca->cnt, acked); } static u32 bictcp_recalc_ssthresh(struct sock *sk) @@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay) */ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { - const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; - if (icsk->icsk_ca_state == TCP_CA_Open) { - u32 ratio = ca->delayed_ack; - - ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; - ratio += cnt; - - ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT); - } - /* Some calls are for duplicates without timetamps */ if (rtt_us < 0) return; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a3f72d7fc06c..d22f54482bab 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.bound_dev_if = sk->sk_bound_dev_if; arg.tos = ip_hdr(skb)->tos; - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); @@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, if (oif) arg.bound_dev_if = oif; arg.tos = tos; - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); @@ -2428,14 +2430,39 @@ struct proto tcp_prot = { }; EXPORT_SYMBOL(tcp_prot); +static void __net_exit tcp_sk_exit(struct net *net) +{ + int cpu; + + for_each_possible_cpu(cpu) + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); + free_percpu(net->ipv4.tcp_sk); +} + static int __net_init tcp_sk_init(struct net *net) { + int res, cpu; + + net->ipv4.tcp_sk = alloc_percpu(struct sock *); + if (!net->ipv4.tcp_sk) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + struct sock *sk; + + res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, + IPPROTO_TCP, net); + if (res) + goto fail; + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; + } net->ipv4.sysctl_tcp_ecn = 2; return 0; -} -static void __net_exit tcp_sk_exit(struct net *net) -{ +fail: + tcp_sk_exit(net); + + return res; } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 6824afb65d93..333bcb2415ff 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp, acked); else - tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); + tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), + 1); } static u32 tcp_scalable_ssthresh(struct sock *sk) diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index a4d2d2d88dca..112151eeee45 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* In the "non-congestive state", increase cwnd * every rtt. */ - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); } else { /* In the "congestive state", increase cwnd * every other rtt. diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index cd7273218598..17d35662930d 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } else { /* Reno */ - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); } /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 13cda4c6313b..01ccc28a686f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (code == ICMPV6_HDR_FIELD) teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); - if (teli && teli == info - 2) { + if (teli && teli == be32_to_cpu(info) - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", @@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } break; case ICMPV6_PKT_TOOBIG: - mtu = info - offset; + mtu = be32_to_cpu(info) - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ce69a12ae48c..d28f2a2efb32 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) skb_copy_secmark(to, from); } -static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) -{ - static u32 ip6_idents_hashrnd __read_mostly; - u32 hash, id; - - net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); - - hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); - hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash); - - id = ip_idents_reserve(hash, 1); - fhdr->identification = htonl(id); -} - int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 97f41a3e68d9..54520a0bd5e3 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -9,6 +9,24 @@ #include <net/addrconf.h> #include <net/secure_seq.h> +u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src) +{ + u32 hash, id; + + hash = __ipv6_addr_jhash(dst, hashrnd); + hash = __ipv6_addr_jhash(src, hash); + + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, + * set the hight order instead thus minimizing possible future + * collisions. + */ + id = ip_idents_reserve(hash, 1); + if (unlikely(!id)) + id = 1 << 31; + + return id; +} + /* This function exists only for tap drivers that must support broken * clients requesting UFO without specifying an IPv6 fragment ID. * @@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb) static u32 ip6_proxy_idents_hashrnd __read_mostly; struct in6_addr buf[2]; struct in6_addr *addrs; - u32 hash, id; + u32 id; addrs = skb_header_pointer(skb, skb_network_offset(skb) + @@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb) net_get_random_once(&ip6_proxy_idents_hashrnd, sizeof(ip6_proxy_idents_hashrnd)); - hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); - hash = __ipv6_addr_jhash(&addrs[0], hash); - - id = ip_idents_reserve(hash, 1); - skb_shinfo(skb)->ip6_frag_id = htonl(id); + id = __ipv6_select_ident(ip6_proxy_idents_hashrnd, + &addrs[1], &addrs[0]); + skb_shinfo(skb)->ip6_frag_id = id; } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); +void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) +{ + static u32 ip6_idents_hashrnd __read_mostly; + u32 id; + + net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); + + id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr, + &rt->rt6i_src.addr); + fhdr->identification = htonl(id); +} +EXPORT_SYMBOL(ipv6_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 213546bd6d5d..cdbfe5af6187 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[], if (data[IFLA_IPTUN_ENCAP_SPORT]) { ret = true; - ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); } if (data[IFLA_IPTUN_ENCAP_DPORT]) { ret = true; - ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); } return ret; @@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || - nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || - nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index b6aa8ed18257..a56276996b72 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); + /* Set the IPv6 fragment id if not set yet */ + if (!skb_shinfo(skb)->ip6_frag_id) + ipv6_proxy_select_ident(skb); + segs = NULL; goto out; } @@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; - fptr->identification = skb_shinfo(skb)->ip6_frag_id; + if (skb_shinfo(skb)->ip6_frag_id) + fptr->identification = skb_shinfo(skb)->ip6_frag_id; + else + ipv6_select_ident(fptr, + (struct rt6_info *)skb_dst(skb)); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 990decba1fe4..b87ca32efa0b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) return err; } -static int ip_vs_route_me_harder(int af, struct sk_buff *skb) +static int ip_vs_route_me_harder(int af, struct sk_buff *skb, + unsigned int hooknum) { + if (!sysctl_snat_reroute(skb)) + return 0; + /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */ + if (NF_INET_LOCAL_IN == hooknum) + return 0; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { - if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) + struct dst_entry *dst = skb_dst(skb); + + if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && + ip6_route_me_harder(skb) != 0) return 1; } else #endif - if ((sysctl_snat_reroute(skb) || - skb_rtable(skb)->rt_flags & RTCF_LOCAL) && + if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && ip_route_me_harder(skb, RTN_LOCAL) != 0) return 1; @@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, union nf_inet_addr *snet, __u8 protocol, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, - unsigned int offset, unsigned int ihl) + unsigned int offset, unsigned int ihl, + unsigned int hooknum) { unsigned int verdict = NF_DROP; @@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb, #endif ip_vs_nat_icmp(skb, pp, cp, 1); - if (ip_vs_route_me_harder(af, skb)) + if (ip_vs_route_me_harder(af, skb, hooknum)) goto out; /* do the statistics and put it back */ @@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related, snet.ip = iph->saddr; return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, - pp, ciph.len, ihl); + pp, ciph.len, ihl, hooknum); } #ifdef CONFIG_IP_VS_IPV6 @@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, snet.in6 = ciph.saddr.in6; writable = ciph.len; return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, - pp, writable, sizeof(struct ipv6hdr)); + pp, writable, sizeof(struct ipv6hdr), + hooknum); } #endif @@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb, */ static unsigned int handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, - struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, + unsigned int hooknum) { struct ip_vs_protocol *pp = pd->pp; @@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * if it came from this machine itself. So re-compute * the routing information. */ - if (ip_vs_route_me_harder(af, skb)) + if (ip_vs_route_me_harder(af, skb, hooknum)) goto drop; IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); @@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) cp = pp->conn_out_get(af, skb, &iph, 0); if (likely(cp)) - return handle_response(af, skb, pd, cp, &iph); + return handle_response(af, skb, pd, cp, &iph, hooknum); if (sysctl_nat_icmp_send(net) && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP || diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3b3ddb4fb9ee..1ff04bcd4871 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) /* Restore old counters on this cpu, no problem. Per-cpu statistics * are not exposed to userspace. */ + preempt_disable(); stats = this_cpu_ptr(newstats); stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); + preempt_enable(); return newstats; } @@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); - if (trans == NULL) + if (trans == NULL) { + free_percpu(stats); return -ENOMEM; + } nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, hookfn = type->hooks[hooknum]; basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); - if (basechain == NULL) + if (basechain == NULL) { + module_put(type->owner); return -ENOMEM; + } if (nla[NFTA_CHAIN_COUNTERS]) { stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); @@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, } EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); +int nft_chain_validate_hooks(const struct nft_chain *chain, + unsigned int hook_flags) +{ + struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + + if ((1 << basechain->ops[0].hooknum) & hook_flags) + return 0; + + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL_GPL(nft_chain_validate_hooks); + /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index d1ffd5eb3a9b..9aea747b43ea 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { }; EXPORT_SYMBOL_GPL(nft_masq_policy); +int nft_masq_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + return nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_POST_ROUTING)); +} +EXPORT_SYMBOL_GPL(nft_masq_validate); + int nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx, struct nft_masq *priv = nft_expr_priv(expr); int err; - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); - if (err < 0) + err = nft_masq_validate(ctx, expr, NULL); + if (err) return err; if (tb[NFTA_MASQ_FLAGS] == NULL) @@ -60,12 +75,5 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_masq_dump); -int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} -EXPORT_SYMBOL_GPL(nft_masq_validate); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index aff54fb1c8a0..a0837c6c9283 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = { [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, }; -static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nlattr * const tb[]) +static int nft_nat_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) { struct nft_nat *priv = nft_expr_priv(expr); - u32 family; int err; err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); if (err < 0) return err; + switch (priv->type) { + case NFT_NAT_SNAT: + err = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_POST_ROUTING) | + (1 << NF_INET_LOCAL_IN)); + break; + case NFT_NAT_DNAT: + err = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_OUT)); + break; + } + + return err; +} + +static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_nat *priv = nft_expr_priv(expr); + u32 family; + int err; + if (tb[NFTA_NAT_TYPE] == NULL || (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) @@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; } + err = nft_nat_validate(ctx, expr, NULL); + if (err < 0) + return err; + if (tb[NFTA_NAT_FAMILY] == NULL) return -EINVAL; @@ -219,13 +246,6 @@ nla_put_failure: return -1; } -static int nft_nat_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} - static struct nft_expr_type nft_nat_type; static const struct nft_expr_ops nft_nat_ops = { .type = &nft_nat_type, diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 9e8093f28311..d7e9e93a4e90 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = { }; EXPORT_SYMBOL_GPL(nft_redir_policy); +int nft_redir_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + return nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_OUT)); +} +EXPORT_SYMBOL_GPL(nft_redir_validate); + int nft_redir_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx, struct nft_redir *priv = nft_expr_priv(expr); int err; - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + err = nft_redir_validate(ctx, expr, NULL); if (err < 0) return err; @@ -88,12 +104,5 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_redir_dump); -int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} -EXPORT_SYMBOL_GPL(nft_redir_validate); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 02fdde28dada..75532efa51cd 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups, for (undo = 0; undo < group; undo++) if (test_bit(undo, &groups)) - nlk->netlink_unbind(sock_net(sk), undo); + nlk->netlink_unbind(sock_net(sk), undo + 1); } static int netlink_bind(struct socket *sock, struct sockaddr *addr, @@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, for (group = 0; group < nlk->ngroups; group++) { if (!test_bit(group, &groups)) continue; - err = nlk->netlink_bind(net, group); + err = nlk->netlink_bind(net, group + 1); if (!err) continue; netlink_undo_bind(group, groups, sk); diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index c3b0cd43eb56..c173f69e1479 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = { { .procname = "max_unacked_packets", .data = &rds_sysctl_max_unacked_packets, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_unacked_bytes", .data = &rds_sysctl_max_unacked_bytes, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index aad6a679fb13..baef987fe2c0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, } EXPORT_SYMBOL(tcf_exts_change); -#define tcf_exts_first_act(ext) \ - list_first_entry(&(exts)->actions, struct tc_action, list) +#define tcf_exts_first_act(ext) \ + list_first_entry_or_null(&(exts)->actions, \ + struct tc_action, list) int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) { @@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT struct tc_action *a = tcf_exts_first_act(exts); - if (tcf_action_copy_stats(skb, a, 1) < 0) + if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) return -1; #endif return 0; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 9b05924cc386..333cd94ba381 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_FQ_FLOW_PLIMIT]) q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); - if (tb[TCA_FQ_QUANTUM]) - q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); + if (tb[TCA_FQ_QUANTUM]) { + u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); + + if (quantum > 0) + q->quantum = quantum; + else + err = -EINVAL; + } if (tb[TCA_FQ_INITIAL_QUANTUM]) q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e49e231cef52..06320c8c1c86 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2608,7 +2608,7 @@ do_addr_param: addr_param = param.v + sizeof(sctp_addip_param_t); - af = sctp_get_af_specific(param_type2af(param.p->type)); + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (af == NULL) break; diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c index 1a3a6fa27158..c6bba99a90b2 100644 --- a/sound/i2c/other/ak4113.c +++ b/sound/i2c/other/ak4113.c @@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg) static void snd_ak4113_free(struct ak4113 *chip) { - chip->init = 1; /* don't schedule new work */ - mb(); + atomic_inc(&chip->wq_processing); /* don't schedule new work */ cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4113_stats); + atomic_set(&chip->wq_processing, 0); for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) chip->regmap[reg] = pgm[reg]; @@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip) void snd_ak4113_reinit(struct ak4113 *chip) { - chip->init = 1; - mb(); - flush_delayed_work(&chip->work); + if (atomic_inc_return(&chip->wq_processing) == 1) + cancel_delayed_work_sync(&chip->work); ak4113_init_regs(chip); /* bring up statistics / event queing */ - chip->init = 0; - if (chip->kctls[0]) + if (atomic_dec_and_test(&chip->wq_processing)) schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL_GPL(snd_ak4113_reinit); @@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work) { struct ak4113 *chip = container_of(work, struct ak4113, work.work); - if (!chip->init) + if (atomic_inc_return(&chip->wq_processing) == 1) snd_ak4113_check_rate_and_errors(chip, chip->check_flags); - schedule_delayed_work(&chip->work, HZ / 10); + if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); } diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c index c7f56339415d..b70e6eccbd03 100644 --- a/sound/i2c/other/ak4114.c +++ b/sound/i2c/other/ak4114.c @@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114) static void snd_ak4114_free(struct ak4114 *chip) { - chip->init = 1; /* don't schedule new work */ - mb(); + atomic_inc(&chip->wq_processing); /* don't schedule new work */ cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card, chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4114_stats); + atomic_set(&chip->wq_processing, 0); for (reg = 0; reg < 6; reg++) chip->regmap[reg] = pgm[reg]; @@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip) void snd_ak4114_reinit(struct ak4114 *chip) { - chip->init = 1; - mb(); - flush_delayed_work(&chip->work); + if (atomic_inc_return(&chip->wq_processing) == 1) + cancel_delayed_work_sync(&chip->work); ak4114_init_regs(chip); /* bring up statistics / event queing */ - chip->init = 0; - if (chip->kctls[0]) + if (atomic_dec_and_test(&chip->wq_processing)) schedule_delayed_work(&chip->work, HZ / 10); } @@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work) { struct ak4114 *chip = container_of(work, struct ak4114, work.work); - if (!chip->init) + if (atomic_inc_return(&chip->wq_processing) == 1) snd_ak4114_check_rate_and_errors(chip, chip->check_flags); - - schedule_delayed_work(&chip->work, HZ / 10); + if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL(snd_ak4114_create); diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 99ff35e2a25d..35e44e463cfe 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, struct atmel_pcm_dma_params *dma_params; int dir, channels, bits; u32 tfmr, rfmr, tcmr, rcmr; - int start_event; int ret; int fslen, fslen_ext; @@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, * The SSC transmit clock is obtained from the BCLK signal on * on the TK line, and the SSC receive clock is * generated from the transmit clock. - * - * For single channel data, one sample is transferred - * on the falling edge of the LRC clock. - * For two channel data, one sample is - * transferred on both edges of the LRC clock. */ - start_event = ((channels == 1) - ? SSC_START_FALLING_RF - : SSC_START_EDGE_RF); - rcmr = SSC_BF(RCMR_PERIOD, 0) | SSC_BF(RCMR_STTDLY, START_DELAY) - | SSC_BF(RCMR_START, start_event) + | SSC_BF(RCMR_START, SSC_START_FALLING_RF) | SSC_BF(RCMR_CKI, SSC_CKI_RISING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? @@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE) | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE) | SSC_BF(RFMR_FSLEN, 0) - | SSC_BF(RFMR_DATNB, 0) + | SSC_BF(RFMR_DATNB, (channels - 1)) | SSC_BIT(RFMR_MSBF) | SSC_BF(RFMR_LOOP, 0) | SSC_BF(RFMR_DATLEN, (bits - 1)); tcmr = SSC_BF(TCMR_PERIOD, 0) | SSC_BF(TCMR_STTDLY, START_DELAY) - | SSC_BF(TCMR_START, start_event) + | SSC_BF(TCMR_START, SSC_START_FALLING_RF) | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) | SSC_BF(TCMR_CKO, SSC_CKO_NONE) | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ? @@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, | SSC_BF(TFMR_FSDEN, 0) | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE) | SSC_BF(TFMR_FSLEN, 0) - | SSC_BF(TFMR_DATNB, 0) + | SSC_BF(TFMR_DATNB, (channels - 1)) | SSC_BIT(TFMR_MSBF) | SSC_BF(TFMR_DATDEF, 0) | SSC_BF(TFMR_DATLEN, (bits - 1)); @@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period) | SSC_BF(RCMR_STTDLY, 1) | SSC_BF(RCMR_START, SSC_START_RISING_RF) - | SSC_BF(RCMR_CKI, SSC_CKI_RISING) + | SSC_BF(RCMR_CKI, SSC_CKI_FALLING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, SSC_CKS_DIV); @@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period) | SSC_BF(TCMR_STTDLY, 1) | SSC_BF(TCMR_START, SSC_START_RISING_RF) - | SSC_BF(TCMR_CKI, SSC_CKI_RISING) + | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS) | SSC_BF(TCMR_CKS, SSC_CKS_DIV); @@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rcmr = SSC_BF(RCMR_PERIOD, 0) | SSC_BF(RCMR_STTDLY, START_DELAY) | SSC_BF(RCMR_START, SSC_START_RISING_RF) - | SSC_BF(RCMR_CKI, SSC_CKI_RISING) + | SSC_BF(RCMR_CKI, SSC_CKI_FALLING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? SSC_CKS_PIN : SSC_CKS_CLOCK); diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index c3f2decd643c..1ff726c29249 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match); static struct acpi_device_id rt5640_acpi_match[] = { { "INT33CA", 0 }, { "10EC5640", 0 }, + { "10EC5642", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match); diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 29cf7ce610f4..aa98be32bb60 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) /* setting i2s data format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: - i2sctl |= SGTL5000_I2S_MODE_PCM; + i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT; break; case SND_SOC_DAIFMT_DSP_B: - i2sctl |= SGTL5000_I2S_MODE_PCM; + i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRALIGN; break; case SND_SOC_DAIFMT_I2S: - i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; + i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT; break; case SND_SOC_DAIFMT_RIGHT_J: - i2sctl |= SGTL5000_I2S_MODE_RJ; + i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRPOL; break; case SND_SOC_DAIFMT_LEFT_J: - i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; + i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRALIGN; break; default: @@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, if (ret) return ret; + /* Need 8 clocks before I2C accesses */ + udelay(1); + /* read chip information */ ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, ®); if (ret) diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index b7ebce054b4e..dd222b10ce13 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream, delay += aic3x->tdm_delay; /* Configure data delay */ - snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay); + snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay); return 0; } diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index b9211b42f6e9..b115ed815db9 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c, if (wm8731 == NULL) return -ENOMEM; + mutex_init(&wm8731->lock); + wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap); if (IS_ERR(wm8731->regmap)) { ret = PTR_ERR(wm8731->regmap); diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c index 3eddb18fefd1..5cc457ef8894 100644 --- a/sound/soc/codecs/wm9705.c +++ b/sound/soc/codecs/wm9705.c @@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec) struct snd_ac97 *ac97; int ret = 0; - ac97 = snd_soc_new_ac97_codec(codec); + ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(ac97)) { ret = PTR_ERR(ac97); dev_err(codec->dev, "Failed to register AC97 codec\n"); return ret; } - snd_soc_codec_set_drvdata(codec, ac97); - ret = wm9705_reset(codec); if (ret) - goto reset_err; + goto err_put_device; + + ret = device_add(&ac97->dev); + if (ret) + goto err_put_device; + + snd_soc_codec_set_drvdata(codec, ac97); return 0; -reset_err: - snd_soc_free_ac97_codec(ac97); +err_put_device: + put_device(&ac97->dev); return ret; } diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index e04643d2bb24..9517571e820d 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); int ret = 0; - wm9712->ac97 = snd_soc_new_ac97_codec(codec); + wm9712->ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(wm9712->ac97)) { ret = PTR_ERR(wm9712->ac97); dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); @@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) ret = wm9712_reset(codec, 0); if (ret < 0) - goto reset_err; + goto err_put_device; + + ret = device_add(&wm9712->ac97->dev); + if (ret) + goto err_put_device; /* set alc mux to none */ ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); return 0; -reset_err: - snd_soc_free_ac97_codec(wm9712->ac97); +err_put_device: + put_device(&wm9712->ac97->dev); return ret; } diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 71b9d5b0734d..6ab1122a3872 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); int ret = 0, reg; - wm9713->ac97 = snd_soc_new_ac97_codec(codec); + wm9713->ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(wm9713->ac97)) return PTR_ERR(wm9713->ac97); @@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) wm9713_reset(codec, 0); ret = wm9713_reset(codec, 1); if (ret < 0) - goto reset_err; + goto err_put_device; + + ret = device_add(&wm9713->ac97->dev); + if (ret) + goto err_put_device; /* unmute the adc - move to kcontrol */ reg = ac97_read(codec, AC97_CD) & 0x7fff; @@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) return 0; -reset_err: - snd_soc_free_ac97_codec(wm9713->ac97); +err_put_device: + put_device(&wm9713->ac97->dev); return ret; } diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 5bf14040c24a..8156cc1accb7 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work) } /* tell DSP that notification has been handled */ - sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD, + sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD, SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE); /* unmask busy interrupt */ - sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); + sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); } static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header) diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c index 2ac72eb5e75d..b3360139c41a 100644 --- a/sound/soc/intel/sst/sst_acpi.c +++ b/sound/soc/intel/sst/sst_acpi.c @@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = { /* Cherryview-based platforms: CherryTrail and Braswell */ static struct sst_machines sst_acpi_chv[] = { - {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin", + {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin", &chv_platform_data }, {}, }; diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c index 2e10e9a38376..08d7259bbaab 100644 --- a/sound/soc/soc-ac97.c +++ b/sound/soc/soc-ac97.c @@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev) } /** - * snd_soc_new_ac97_codec - initailise AC97 device - * @codec: audio codec + * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device + * @codec: The CODEC for which to create the AC'97 device * - * Initialises AC97 codec resources for use by ad-hoc devices only. + * Allocated a new snd_ac97 device and intializes it, but does not yet register + * it. The caller is responsible to either call device_add(&ac97->dev) to + * register the device, or to call put_device(&ac97->dev) to free the device. + * + * Returns: A snd_ac97 device or a PTR_ERR in case of an error. */ -struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) +struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec) { struct snd_ac97 *ac97; - int ret; ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (ac97 == NULL) @@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) codec->component.card->snd_card->number, 0, codec->component.name); - ret = device_register(&ac97->dev); + device_initialize(&ac97->dev); + + return ac97; +} +EXPORT_SYMBOL(snd_soc_alloc_ac97_codec); + +/** + * snd_soc_new_ac97_codec - initailise AC97 device + * @codec: audio codec + * + * Initialises AC97 codec resources for use by ad-hoc devices only. + */ +struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) +{ + struct snd_ac97 *ac97; + int ret; + + ac97 = snd_soc_alloc_ac97_codec(codec); + if (IS_ERR(ac97)) + return ac97; + + ret = device_add(&ac97->dev); if (ret) { put_device(&ac97->dev); return ERR_PTR(ret); diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore new file mode 100644 index 000000000000..cc0e7a9f99e3 --- /dev/null +++ b/tools/lib/lockdep/.gitignore @@ -0,0 +1 @@ +liblockdep.so.* diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index 52f9279c6c13..4b866c54f624 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -104,7 +104,7 @@ N = export Q VERBOSE -INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) +INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g |