diff options
314 files changed, 3071 insertions, 1831 deletions
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt index d3058768b23d..c53e0b08032f 100644 --- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt +++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt @@ -35,7 +35,7 @@ Example: device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt index 305e3df3d9b1..9cf9446eaf2e 100644 --- a/Documentation/devicetree/bindings/phy/ti-phy.txt +++ b/Documentation/devicetree/bindings/phy/ti-phy.txt @@ -82,6 +82,9 @@ Optional properties: - id: If there are multiple instance of the same type, in order to differentiate between each instance "id" can be used (e.g., multi-lane PCIe PHY). If "id" is not provided, it is set to default value of '1'. + - syscon-pllreset: Handle to system control region that contains the + CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0 + register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy. This is usually a subnode of ocp2scp to which it is connected. @@ -100,3 +103,16 @@ usb3phy@4a084400 { "sysclk", "refclk"; }; + +sata_phy: phy@4A096000 { + compatible = "ti,phy-pipe3-sata"; + reg = <0x4A096000 0x80>, /* phy_rx */ + <0x4A096400 0x64>, /* phy_tx */ + <0x4A096800 0x40>; /* pll_ctrl */ + reg-names = "phy_rx", "phy_tx", "pll_ctrl"; + ctrl-module = <&omap_control_sata>; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; + #phy-cells = <0>; +}; diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index c86f2f1ae4f6..1fec1135791d 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt @@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for -the DualPoint Stick. For non interleaved dualpoint devices the pointingstick -buttons get reported separately in the PSM, PSR and PSL bits. +the DualPoint Stick. The M, R and L bits signal the combined status of both +the pointingstick and touchpad buttons, except for Dell dualpoint devices +where the pointingstick buttons get reported separately in the PSM, PSR +and PSL bits. Dualpoint device -- interleaved packet format --------------------------------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 98ede02a96f2..ca51eba9fe5d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5600,6 +5600,7 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner <tglx@linutronix.de> M: Jason Cooper <jason@lakedaemon.net> +M: Marc Zyngier <marc.zyngier@arm.com> L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -5608,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Benjamin Herrenschmidt <benh@kernel.crashing.org> +M: Jiang Liu <jiang.liu@linux.intel.com> +M: Marc Zyngier <marc.zyngier@arm.com> S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt F: include/linux/irqdomain.h F: kernel/irq/irqdomain.c +F: kernel/irq/msi.c ISAPNP M: Jaroslav Kysela <perex@perex.cz> @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* @@ -597,6 +597,11 @@ endif # $(dot-config) # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default +# values of the respective KBUILD_* variables +ARCH_CPPFLAGS := +ARCH_AFLAGS := +ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) @@ -848,10 +853,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n + mod_compress_cmd = gzip -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz + mod_compress_cmd = xz -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 91cf4055acab..bd4670d1b89b 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K config ARC_PAGE_SIZE_16K bool "16KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 config ARC_PAGE_SIZE_4K bool "4KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 endchoice @@ -365,6 +365,11 @@ config ARC_HAS_LLSC default y depends on !ARC_CANT_LLSC +config ARC_STAR_9000923308 + bool "Workaround for llock/scond livelock" + default y + depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC + config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" default y @@ -379,6 +384,10 @@ config ARC_HAS_LL64 dest operands with 2 possible source operands. default y +config ARC_HAS_DIV_REM + bool "Insn: div, divu, rem, remu" + default y + config ARC_HAS_RTC bool "Local 64-bit r/o cycle counter" default n diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 46d87310220d..8a27a48304a4 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape +ifdef CONFIG_ISA_ARCV2 + ifndef CONFIG_ARC_HAS_LL64 -cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 +cflags-y += -mno-ll64 +endif + +ifndef CONFIG_ARC_HAS_DIV_REM +cflags-y += -mno-div-rem +endif + endif cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 070f58827a5c..c8f57b8449dc 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -89,11 +89,10 @@ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 - /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 - +#define AUX_NON_VOL 0x5e /* * Floating Pt Registers @@ -240,9 +239,9 @@ struct bcr_extn_xymem { struct bcr_perip { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:8, pad2:8, sz:8, pad:8; + unsigned int start:8, pad2:8, sz:8, ver:8; #else - unsigned int pad:8, sz:8, pad2:8, start:8; + unsigned int ver:8, sz:8, pad2:8, start:8; #endif }; diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03484cb4d16d..87d18ae53115 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -23,33 +23,60 @@ #define atomic_set(v, i) (((v)->counter) = (i)) -#ifdef CONFIG_ISA_ARCV2 -#define PREFETCHW " prefetchw [%1] \n" -#else -#define PREFETCHW +#ifdef CONFIG_ARC_STAR_9000923308 + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay = 1, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " bz 4f \n" \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + "4: ; --- success --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ + +#else /* !CONFIG_ARC_STAR_9000923308 */ + +#define SCOND_FAIL_RETRY_VAR_DEF + +#define SCOND_FAIL_RETRY_ASM \ + " bnz 1b \n" \ + +#define SCOND_FAIL_RETRY_VARS + #endif #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ + [i] "ir" (i) \ : "cc"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ smp_mb(); \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), \ + [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ - return temp; \ + return val; \ } #else /* !CONFIG_ARC_HAS_LLSC */ @@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS /** * __atomic_add_unless - add unless the number is a given value diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 91694ec1ce95..69095da1fcfd 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -20,20 +20,20 @@ struct pt_regs { /* Real registers */ - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long lp_start, lp_end, lp_count; + unsigned long lp_start, lp_end, lp_count; - long status32; /* status32_l1, status32_l2, erstatus */ - long ret; /* ilink1, ilink2 or eret */ - long blink; - long fp; - long r26; /* gp */ + unsigned long status32; /* status32_l1, status32_l2, erstatus */ + unsigned long ret; /* ilink1, ilink2 or eret */ + unsigned long blink; + unsigned long fp; + unsigned long r26; /* gp */ - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; /* user/kernel sp depending on where we came from */ - long orig_r0; + unsigned long sp; /* User/Kernel depending on where we came from */ + unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq @@ -55,13 +55,13 @@ struct pt_regs { unsigned long event; }; - long user_r25; + unsigned long user_r25; }; #else struct pt_regs { - long orig_r0; + unsigned long orig_r0; union { struct { @@ -76,26 +76,26 @@ struct pt_regs { unsigned long event; }; - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long user_r25; + unsigned long user_r25; - long r26; /* gp */ - long fp; - long sp; /* user/kernel sp depending on where we came from */ + unsigned long r26; /* gp */ + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ - long r12; + unsigned long r12; /*------- Below list auto saved by h/w -----------*/ - long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; - long blink; - long lp_end, lp_start, lp_count; + unsigned long blink; + unsigned long lp_end, lp_start, lp_count; - long ei, ldi, jli; + unsigned long ei, ldi, jli; - long ret; - long status32; + unsigned long ret; + unsigned long status32; }; #endif @@ -103,10 +103,10 @@ struct pt_regs { /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { - long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; -#define instruction_pointer(regs) (unsigned long)((regs)->ret) +#define instruction_pointer(regs) ((regs)->ret) #define profile_pc(regs) instruction_pointer(regs) /* return 1 if user mode or 0 if kernel mode */ @@ -142,7 +142,7 @@ struct callee_regs { static inline long regs_return_value(struct pt_regs *regs) { - return regs->r0; + return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index e1651df6a93d..db8c59d1eaeb 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -18,9 +18,518 @@ #define arch_spin_unlock_wait(x) \ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) +#ifdef CONFIG_ARC_HAS_LLSC + +/* + * A normal LLOCK/SCOND based system, w/o need for livelock workaround + */ +#ifndef CONFIG_ARC_STAR_9000923308 + static inline void arch_spin_lock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " mov %[got_it], 1 \n" + "4: \n" + " \n" + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + smp_mb(); + + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + + smp_mb(); +} + +#else /* CONFIG_ARC_STAR_9000923308 */ + +/* + * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping + * coherency transactions in the SCU. The exclusive line state keeps rotating + * among contenting cores leading to a never ending cycle. So break the cycle + * by deferring the retry of failed exclusive access (SCOND). The actual delay + * needed is function of number of contending cores as well as the unrelated + * coherency traffic from other cores. To keep the code simple, start off with + * small delay of 1 which would suffice most cases and in case of contention + * double the delay. Eventually the delay is sufficient such that the coherency + * pipeline is drained, thus a subsequent exclusive access would succeed. + */ + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + " \n" \ + "4: ; --- done --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " scond %[UNLOCKED], [%[rwlock]]\n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS + +#endif /* CONFIG_ARC_STAR_9000923308 */ + +#else /* !CONFIG_ARC_HAS_LLSC */ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * This smp_mb() is technically superfluous, we only need the one @@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" - : "+&r" (tmp) + : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); @@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) smp_mb(); } +/* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); - return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); + return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier @@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); @@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) /* * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. - * - * Unfair locking as Writers could be starved indefinitely by Reader(s) */ -/* Would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->counter > 0) - -/* Would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) - /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { @@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&(rw->lock_mutex)); } +#endif + +#define arch_read_can_lock(x) ((x)->counter > 0) +#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 662627ced4f2..4e1ef5f650c6 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h @@ -26,7 +26,9 @@ typedef struct { */ typedef struct { volatile unsigned int counter; +#ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t lock_mutex; +#endif } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 76a7739aab1c..0b3ef63d4a03 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -32,20 +32,20 @@ */ struct user_regs_struct { - long pad; + unsigned long pad; struct { - long bta, lp_start, lp_end, lp_count; - long status32, ret, blink, fp, gp; - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; + unsigned long bta, lp_start, lp_end, lp_count; + unsigned long status32, ret, blink, fp, gp; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long sp; } scratch; - long pad2; + unsigned long pad2; struct { - long r25, r24, r23, r22, r21, r20; - long r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20; + unsigned long r19, r18, r17, r16, r15, r14, r13; } callee; - long efa; /* break pt addr, for break points in delay slots */ - long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ + unsigned long efa; /* break pt addr, for break points in delay slots */ + unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 18cc01591c96..cabde9dc0696 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void) struct bcr_perip uncached_space; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + unsigned long perip_space; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); @@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void) cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); - BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); + if (uncached_space.ver < 3) + perip_space = uncached_space.start << 24; + else + perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000; + + BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); @@ -330,6 +336,10 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); + + if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) + panic("llock/scond livelock workaround missing\n"); } /* diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 3364d2bbc515..4294761a2b3e 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta, return 0; } -static void arc_clkevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int arc_clkevent_set_periodic(struct clock_event_device *dev) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* - * At X Hz, 1 sec = 1000ms -> X cycles; - * 10ms -> X / 100 cycles - */ - arc_timer_event_setup(arc_get_core_freq() / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - default: - break; - } - - return; + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_get_core_freq() / HZ); + return 0; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { - .name = "ARC Timer0", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .mode = CLOCK_EVT_MODE_UNUSED, - .rating = 300, - .irq = TIMER0_IRQ, /* hardwired, no need for resources */ - .set_next_event = arc_clkevent_set_next_event, - .set_mode = arc_clkevent_set_mode, + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = TIMER0_IRQ, /* hardwired, no need for resources */ + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) @@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); - int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; + int irq_reenable = clockevent_state_periodic(evt); /* * Any write to CTRL reg ACks the interrupt, we rewrite the diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index 1b2b3acfed52..0cab0b8a57c5 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -206,7 +206,7 @@ unalignedOffby3: ld.ab r6, [r1, 4] prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetch [r3, 32] ;Prefetch the next write location + prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 92d573c734b5..365b18364815 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -10,12 +10,6 @@ #undef PREALLOC_NOT_AVAIL -#ifdef PREALLOC_NOT_AVAIL -#define PREWRITE(A,B) prefetchw [(A),(B)] -#else -#define PREWRITE(A,B) prealloc [(A),(B)] -#endif - ENTRY(memset) prefetchw [r0] ; Prefetch the write location mov.f 0, r2 @@ -51,9 +45,15 @@ ENTRY(memset) ;;; Convert len to Dwords, unfold x8 lsr.f lp_count, lp_count, 6 + lpnz @.Lset64bytes ;; LOOP START - PREWRITE(r3, 64) ;Prefetch the next write location +#ifdef PREALLOC_NOT_AVAIL + prefetchw [r3, 64] ;Prefetch the next write location +#else + prealloc [r3, 64] +#endif +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -62,16 +62,45 @@ ENTRY(memset) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset64bytes: lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START prefetchw [r3, 32] ;Prefetch the next write location +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset32bytes: and.f lp_count, r2, 0x1F ;Last remaining 31 bytes diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 99f7da513a48..e7769c3ab5f2 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od) static void __init axs103_early_init(void) { + /* + * AXS103 configurations for SMP/QUAD configurations share device tree + * which defaults to 90 MHz. However recent failures of Quad config + * revealed P&R timing violations so clamp it down to safe 50 MHz + * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack + * + * This hack is really hacky as of now. Fix it properly by getting the + * number of cores as return value of platform's early SMP callback + */ +#ifdef CONFIG_ARC_MCIP + unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; + if (num_cores > 2) + arc_set_core_freq(50 * 1000000); +#endif + switch (arc_get_core_freq()/1000000) { case 33: axs103_set_freq(1, 1, 1); diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 8f1e25bcecbd..4a0718ccf68e 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1140,6 +1140,7 @@ ctrl-module = <&omap_control_sata>; clocks = <&sys_clkin1>, <&sata_ref_clk>; clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index d7201333e3bc..2db99433e17f 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -138,8 +138,8 @@ mipi_phy: video-phy@10020710 { compatible = "samsung,s5pv210-mipi-video-phy"; - reg = <0x10020710 8>; #phy-cells = <1>; + syscon = <&pmu_system_controller>; }; pd_cam: cam-power-domain@10023C00 { diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index e0abfc3324d1..e050d85cdacd 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -127,6 +127,10 @@ }; }; +&cpu0 { + cpu0-supply = <&buck1_reg>; +}; + &fimd { pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 98f3ce65cb9a..ba34886f8b65 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -188,6 +188,10 @@ }; }; +&cpu0 { + cpu0-supply = <&varm_breg>; +}; + &dsi_0 { vddcore-supply = <&vusb_reg>; vddio-supply = <&vmipi_reg>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index d4f2b11319dd..775892b2cc6a 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -548,6 +548,10 @@ }; }; +&cpu0 { + cpu0-supply = <&vdd_arm_reg>; +}; + &pinctrl_1 { hdmi_hpd: hdmi-hpd { samsung,pins = "gpx3-7"; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 10d3c173396e..3e5ba665d200 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -40,6 +40,18 @@ device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + clocks = <&clock CLK_ARM_CLK>; + clock-names = "cpu"; + clock-latency = <160000>; + + operating-points = < + 1200000 1250000 + 1000000 1150000 + 800000 1075000 + 500000 975000 + 400000 975000 + 200000 950000 + >; cooling-min-level = <4>; cooling-max-level = <2>; #cooling-cells = <2>; /* min followed by max */ diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index b6478e97d6a7..e6540b5cfa4c 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -286,8 +286,8 @@ can1: can@53fe4000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe4000 0x1000>; - clocks = <&clks 33>; - clock-names = "ipg"; + clocks = <&clks 33>, <&clks 33>; + clock-names = "ipg", "per"; interrupts = <43>; status = "disabled"; }; @@ -295,8 +295,8 @@ can2: can@53fe8000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe8000 0x1000>; - clocks = <&clks 34>; - clock-names = "ipg"; + clocks = <&clks 34>, <&clks 34>; + clock-names = "ipg", "per"; interrupts = <44>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 4773d6af66a0..d56d68fe7ffc 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -13,9 +13,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi index d5adee3c0067..af9b7190533a 100644 --- a/arch/arm/boot/dts/k2hk-clocks.dtsi +++ b/arch/arm/boot/dts/k2hk-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi index eb1e3e29f073..ef8464bb11ff 100644 --- a/arch/arm/boot/dts/k2l-clocks.dtsi +++ b/arch/arm/boot/dts/k2l-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 3d0b8755caee..3d25dba143a5 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -17,6 +17,7 @@ }; aliases { + serial1 = &uart1; stmpe-i2c0 = &stmpe0; stmpe-i2c1 = &stmpe1; }; diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index 85d3b95dfdba..3c140d05f796 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -15,6 +15,10 @@ bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; }; + aliases { + serial1 = &uart1; + }; + src@101e0000 { /* These chrystal drivers are not used on this board */ disable-sxtalo; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 9a5f2ba139b7..ef794a33b4dc 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -757,6 +757,7 @@ clock-names = "uartclk", "apb_pclk"; pinctrl-names = "default"; pinctrl-0 = <&uart0_default_mux>; + status = "disabled"; }; uart1: uart@101fb000 { diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d78c12e7cb5e..486cc4ded190 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np, * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * + * If SYSC access is not needed, the registers will not be remapped + * and non-availability of MPU access is not treated as an error. + * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ @@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, _save_mpu_port_index(oh); + /* if we don't need sysc access we don't need to ioremap */ + if (!oh->class->sysc) + return 0; + + /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; @@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, oh->name); /* Extract the IO space from device tree blob */ - if (!np) + if (!np) { + pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; + } va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { @@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->name, np->name); } - if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, index, np); - if (r < 0) { - WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", - oh->name); - return 0; - } + r = _init_mpu_rt_base(oh, NULL, index, np); + if (r < 0) { + WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", + oh->name); + return 0; } r = _init_clocks(oh, NULL); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 2606c6608bd8..562247bced49 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = { .class = &dra7xx_gpmc_hwmod_class, .clkdm_name = "l3main1_clkdm", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ - .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, + .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .main_clk = "l3_iclk_div", .prcm = { .omap4 = { diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0689c3fb56e3..58093edeea2e 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -823,7 +823,7 @@ device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1670f15ef69e..948f0ad2de23 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitely for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; @@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE)) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ec37ab3f524f..97bc68f4c689 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -199,16 +199,15 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); - xtime_coarse = __current_kernel_time(); vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cee5f93e5712..199a8357838c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -151,7 +151,6 @@ config BMIPS_GENERIC select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ select IRQ_MIPS_CPU - select RAW_IRQ_ACCESSORS select DMA_NONCOHERENT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 01a644f174dd..1ba21204ebe0 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -190,6 +190,7 @@ int get_c0_perfcount_int(void) { return ATH79_MISC_IRQ(5); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 56f5d080ef9d..b7fa9ae28c36 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b572b1b3..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include <asm/bmips.h> - -#define plat_post_dma_flush bmips_post_dma_flush - -#include <asm/mach-generic/dma-coherence.h> - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d8106758142..ae8569475264 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9ab191..03722d4326a1 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d9364bd1..a71da576883c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 3e4491aa6d6b..789d7bf4fef3 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { unsigned int real_len; - cpumask_t mask; + cpumask_t allowed, mask; int retval; struct task_struct *p; @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: read_unlock(&tasklist_lock); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index b130033838ba..5fcec3032f38 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -38,7 +38,7 @@ char *mips_get_machine_name(void) return mips_machine_name; } -#ifdef CONFIG_OF +#ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 74bab9ddd0e1..c6bbf2165051 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel) process_entry: PTR_L s2, (s0) - PTR_ADD s0, s0, SZREG + PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not @@ -61,9 +61,9 @@ copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) - PTR_ADD s4, s4, SZREG - PTR_ADD s2, s2, SZREG - LONG_SUB s6, s6, 1 + PTR_ADDIU s4, s4, SZREG + PTR_ADDIU s2, s2, SZREG + LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19a7705f2a01..5d7f2634996f 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 336708ae5c5b..78cf8c2f1de0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) if (action == 0) scheduler_ipi(); else - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d0744cc77ea7..a31896c33716 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -192,16 +192,6 @@ asmlinkage void start_secondary(void) cpu_startup_entry(CPUHP_ONLINE); } -/* - * Call into both interrupt handlers, as we share the IPI for them - */ -void __irq_entry smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - irq_exit(); -} - static void stop_this_cpu(void *dummy) { /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e207a43b5f8f..8ea28e6ab37d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task, void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) @@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index af84bef0c90d..eb3efd137fd1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -438,7 +438,7 @@ do { \ : "memory"); \ } while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 6ab10573490d..2c218c3bbca5 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void) { return ltq_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509877c6e9d9..1a4738a8f2d3 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 77d96db8253c..aab218c36e0d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 36c0f26fac6b..852a41c6da45 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -133,7 +133,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index d1392f8f5811..fa8f591f3713 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 5625b190edc0..b7bf721eabf5 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -154,6 +154,7 @@ int get_c0_perfcount_int(void) return mips_cpu_perf_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { @@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void) static void __init init_rtc(void) { - /* stop the clock whilst setting it up */ - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); + unsigned char freq, ctrl; - /* 32KHz time base */ - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); + /* Set 32KHz time base if not already set */ + freq = CMOS_READ(RTC_FREQ_SELECT); + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - /* start the clock */ - CMOS_WRITE(RTC_24H, RTC_CONTROL); + /* Ensure SET bit is clear so RTC can run */ + ctrl = CMOS_READ(RTC_CONTROL); + if (ctrl & RTC_SET) + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); } void __init plat_time_init(void) diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index e1d69895fb1d..a120b7a5a8fe 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index dc3e327fbbac..f5fff228b347 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) { clear_c0_eimr(irq); ack_c0_eirr(irq); - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); set_c0_eimr(irq); } diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 42181c7105df..f8d3e081b2eb 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 7c73fcb92a10..8a377346f0ca 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void) { return gic_get_c0_perfcount_int(); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); int get_c0_fdc_int(void) { diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index 10170580a2de..ffa0f7101a97 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 53707aacc0f8..8c624a8b9ea2 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void) { return rt_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 3fbaef97a1b8..16ec4e12daa3 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else #endif { diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index af7d44edd9a8..4c71aea25663 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -29,8 +29,6 @@ #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> -extern void smp_call_function_interrupt(void); - /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware @@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c0c4b3f88a08..1cf66f5ff23d 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d3a831ac0f92..da50e0c9c57e 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 5cf5e6ea213b..7cf0df859d05 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) } /* Unmask the event */ - if (eeh_enabled()) + if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) enable_irq(eeh_event_irq); return ret; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5738d315248b..85cbc96eff6c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb) static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, unsigned levels, unsigned long limit, - unsigned long *current_offset) + unsigned long *current_offset, unsigned long *total_allocated) { struct page *tce_mem = NULL; __be64 *addr, *tmp; @@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, } addr = page_address(tce_mem); memset(addr, 0, allocated); + *total_allocated += allocated; --levels; if (!levels) { @@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, for (i = 0; i < entries; ++i) { tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, - levels, limit, current_offset); + levels, limit, current_offset, total_allocated); if (!tmp) break; @@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, struct iommu_table *tbl) { void *addr; - unsigned long offset = 0, level_shift; + unsigned long offset = 0, level_shift, total_allocated = 0; const unsigned window_shift = ilog2(window_size); unsigned entries_shift = window_shift - page_shift; unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); @@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset); + levels, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = offset; + tbl->it_allocated_size = total_allocated; pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", window_size, tce_table_size, bus_offset); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..f32f843a3631 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { - if (!vcpu->requests) - return 0; retry: kvm_s390_vcpu_request_handled(vcpu); + if (!vcpu->requests) + return 0; /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h index 1f0aa2024e94..6424249d5f78 100644 --- a/arch/sparc/include/asm/visasm.h +++ b/arch/sparc/include/asm/visasm.h @@ -28,16 +28,10 @@ * Must preserve %o5 between VISEntryHalf and VISExitHalf */ #define VISEntryHalf \ - rd %fprs, %o5; \ - andcc %o5, FPRS_FEF, %g0; \ - be,pt %icc, 297f; \ - sethi %hi(298f), %g7; \ - sethi %hi(VISenterhalf), %g1; \ - jmpl %g1 + %lo(VISenterhalf), %g0; \ - or %g7, %lo(298f), %g7; \ - clr %o5; \ -297: wr %o5, FPRS_FEF, %fprs; \ -298: + VISEntry + +#define VISExitHalf \ + VISExit #define VISEntryHalfFast(fail_label) \ rd %fprs, %o5; \ @@ -47,7 +41,7 @@ ba,a,pt %xcc, fail_label; \ 297: wr %o5, FPRS_FEF, %fprs; -#define VISExitHalf \ +#define VISExitHalfFast \ wr %o5, 0, %fprs; #ifndef __ASSEMBLY__ diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 140527a20e7d..83aeeb1dffdb 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) +#ifdef NON_USER_COPY + VISExitHalfFast +#else VISExitHalf - +#endif brz,pn %o2, .Lexit cmp %o2, 19 ble,pn %icc, .Lsmall_unaligned diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index b320ae9e2e2e..a063d84336d6 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 stx %g3, [%g6 + TI_GSR] 2: add %g6, %g1, %g3 - cmp %o5, FPRS_DU - be,pn %icc, 6f - sll %g1, 3, %g1 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 + sll %g1, 3, %g1 stb %o5, [%g3 + TI_FPSAVED] rd %gsr, %g2 add %g6, %g1, %g3 @@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 .align 32 80: jmpl %g7 + %g0, %g0 nop - -6: ldub [%g3 + TI_FPSAVED], %o5 - or %o5, FPRS_DU, %o5 - add %g6, TI_FPREGS+0x80, %g2 - stb %o5, [%g3 + TI_FPSAVED] - - sll %g1, 5, %g1 - add %g6, TI_FPREGS+0xc0, %g3 - wr %g0, FPRS_FEF, %fprs - membar #Sync - stda %f32, [%g2 + %g1] ASI_BLK_P - stda %f48, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 80f - nop - - .align 32 -80: jmpl %g7 + %g0, %g0 - nop - - .align 32 -VISenterhalf: - ldub [%g6 + TI_FPDEPTH], %g1 - brnz,a,pn %g1, 1f - cmp %g1, 1 - stb %g0, [%g6 + TI_FPSAVED] - stx %fsr, [%g6 + TI_XFSR] - clr %o5 - jmpl %g7 + %g0, %g0 - wr %g0, FPRS_FEF, %fprs - -1: bne,pn %icc, 2f - srl %g1, 1, %g1 - ba,pt %xcc, vis1 - sub %g7, 8, %g7 -2: addcc %g6, %g1, %g3 - sll %g1, 3, %g1 - andn %o5, FPRS_DU, %g2 - stb %g2, [%g3 + TI_FPSAVED] - - rd %gsr, %g2 - add %g6, %g1, %g3 - stx %g2, [%g3 + TI_GSR] - add %g6, %g1, %g2 - stx %fsr, [%g2 + TI_XFSR] - sll %g1, 5, %g1 -3: andcc %o5, FPRS_DL, %g0 - be,pn %icc, 4f - add %g6, TI_FPREGS, %g2 - - add %g6, TI_FPREGS+0x40, %g3 - membar #Sync - stda %f0, [%g2 + %g1] ASI_BLK_P - stda %f16, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 4f - nop - - .align 32 -4: and %o5, FPRS_DU, %o5 - jmpl %g7 + %g0, %g0 - wr %o5, FPRS_FEF, %fprs diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1d649a95660c..8069ce12f20b 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page); void VISenter(void); EXPORT_SYMBOL(VISenter); -/* CRYPTO code needs this */ -void VISenterhalf(void); -EXPORT_SYMBOL(VISenterhalf); - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index e8c2c04143cd..c667e104a0c2 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; - memset(to, 0, sizeof(*to)); - err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 2c82bd150d43..7d69afd8b6fa 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params, unsigned int e820_type = 0; unsigned long m = efi->efi_memmap; +#ifdef CONFIG_X86_64 + m |= (u64)efi->efi_memmap_hi << 32; +#endif + d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); switch (d->type) { case EFI_RESERVED_TYPE: diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index a0bf89fd2647..4e10d73cf018 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -280,21 +280,6 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 09b9620a73b4..364d27481a52 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,8 +9,7 @@ * we put the segment information here. */ typedef struct { - void *ldt; - int size; + struct ldt_struct *ldt; #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 804a3a6030ca..984abfe47edc 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {} #endif /* + * ldt_structs can be allocated, used, and freed, but they are never + * modified while live. + */ +struct ldt_struct { + /* + * Xen requires page-aligned LDTs with special permissions. This is + * needed to prevent us from installing evil descriptors such as + * call gates. On native, we could merge the ldt_struct and LDT + * allocations, but it's not worth trying to optimize. + */ + struct desc_struct *entries; + int size; +}; + +static inline void load_mm_ldt(struct mm_struct *mm) +{ + struct ldt_struct *ldt; + + /* lockless_dereference synchronizes with smp_store_release */ + ldt = lockless_dereference(mm->context.ldt); + + /* + * Any change to mm->context.ldt is followed by an IPI to all + * CPUs with the mm active. The LDT will not be freed until + * after the IPI is handled by all such CPUs. This means that, + * if the ldt_struct changes before we return, the values we see + * will be safe, and the new values will be loaded before we run + * any user code. + * + * NB: don't try to convert this to use RCU without extreme care. + * We would still need IRQs off, because we don't want to change + * the local LDT after an IPI loaded a newer value than the one + * that we can see. + */ + + if (unlikely(ldt)) + set_ldt(ldt->entries, ldt->size); + else + clear_LDT(); + + DEBUG_LOCKS_WARN_ON(preemptible()); +} + +/* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); @@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * was called and then modify_ldt changed * prev->context.ldt but suppressed an IPI to this CPU. * In this case, prev->context.ldt != NULL, because we - * never free an LDT while the mm still exists. That - * means that next->context.ldt != prev->context.ldt, - * because mms never share an LDT. + * never set context.ldt to NULL while the mm still + * exists. That means that next->context.ldt != + * prev->context.ldt, because mms never share an LDT. */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); + load_mm_ldt(next); } #ifdef CONFIG_SMP else { @@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); load_mm_cr4(next); - load_LDT_nolock(&next->context); + load_mm_ldt(next); } } #endif diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 6fe6b182c998..9dfce4e0417d 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -57,9 +57,9 @@ struct sigcontext { unsigned long ip; unsigned long flags; unsigned short cs; - unsigned short __pad2; /* Was called gs, but was always zero. */ - unsigned short __pad1; /* Was called fs, but was always zero. */ - unsigned short ss; + unsigned short gs; + unsigned short fs; + unsigned short __pad0; unsigned long err; unsigned long trapno; unsigned long oldmask; diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h index 0e8a973de9ee..40836a9a7250 100644 --- a/arch/x86/include/uapi/asm/sigcontext.h +++ b/arch/x86/include/uapi/asm/sigcontext.h @@ -177,24 +177,9 @@ struct sigcontext { __u64 rip; __u64 eflags; /* RFLAGS */ __u16 cs; - - /* - * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), - * Linux saved and restored fs and gs in these slots. This - * was counterproductive, as fsbase and gsbase were never - * saved, so arch_prctl was presumably unreliable. - * - * If these slots are ever needed for any other purpose, there - * is some risk that very old 64-bit binaries could get - * confused. I doubt that many such binaries still work, - * though, since the same patch in 2.5.64 also removed the - * 64-bit set_thread_area syscall, so it appears that there is - * no TLS API that works in both pre- and post-2.5.64 kernels. - */ - __u16 __pad2; /* Was gs. */ - __u16 __pad1; /* Was fs. */ - - __u16 ss; + __u16 gs; + __u16 fs; + __u16 __pad0; __u64 err; __u64 trapno; __u64 oldmask; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 845dc0df2002..206052e55517 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic_trigger != data->trigger) - mp_register_handler(irq, data->trigger); + mp_register_handler(irq, info->ioapic_trigger); data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.polarity = data->polarity = info->ioapic_polarity; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 922c5e0cea4c..cb9e5df42dd2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1410,7 +1410,7 @@ void cpu_init(void) load_sp0(t, ¤t->thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); @@ -1459,7 +1459,7 @@ void cpu_init(void) load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3658de47900f..9469dfa55607 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + struct ldt_struct *ldt; + if (idx > LDT_ENTRIES) return 0; - if (idx > current->active_mm->context.size) + /* IRQs are off, so this synchronizes with smp_store_release */ + ldt = lockless_dereference(current->active_mm->context.ldt); + if (!ldt || idx > ldt->size) return 0; - desc = current->active_mm->context.ldt; + desc = &ldt->entries[idx]; } else { if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt); + desc = raw_cpu_ptr(gdt_page.gdt) + idx; } - return get_desc_base(desc + idx); + return get_desc_base(desc); } #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d759cc..2bcc0525f1c1 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> @@ -20,82 +21,82 @@ #include <asm/mmu_context.h> #include <asm/syscalls.h> -#ifdef CONFIG_SMP +/* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { - if (current->active_mm == current_mm) - load_LDT(¤t->active_mm->context); + mm_context_t *pc; + + if (current->active_mm != current_mm) + return; + + pc = ¤t->active_mm->context; + set_ldt(pc->ldt->entries, pc->ldt->size); } -#endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ +static struct ldt_struct *alloc_ldt_struct(int size) { - void *oldldt, *newldt; - int oldsize; - - if (mincount <= pc->size) - return 0; - oldsize = pc->size; - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); + struct ldt_struct *new_ldt; + int alloc_size; + + if (size > LDT_ENTRIES) + return NULL; + + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + if (!new_ldt) + return NULL; + + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); + alloc_size = size * LDT_ENTRY_SIZE; + + /* + * Xen is very picky: it requires a page-aligned LDT that has no + * trailing nonzero bytes in any page that contains LDT descriptors. + * Keep it simple: zero the whole allocation and never allocate less + * than PAGE_SIZE. + */ + if (alloc_size > PAGE_SIZE) + new_ldt->entries = vzalloc(alloc_size); else - newldt = (void *)__get_free_page(GFP_KERNEL); - - if (!newldt) - return -ENOMEM; + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (oldsize) - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, - (mincount - oldsize) * LDT_ENTRY_SIZE); + if (!new_ldt->entries) { + kfree(new_ldt); + return NULL; + } - paravirt_alloc_ldt(newldt, mincount); + new_ldt->size = size; + return new_ldt; +} -#ifdef CONFIG_X86_64 - /* CHECKME: Do we really need this ? */ - wmb(); -#endif - pc->ldt = newldt; - wmb(); - pc->size = mincount; - wmb(); - - if (reload) { -#ifdef CONFIG_SMP - preempt_disable(); - load_LDT(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); -#else - load_LDT(pc); -#endif - } - if (oldsize) { - paravirt_free_ldt(oldldt, oldsize); - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - put_page(virt_to_page(oldldt)); - } - return 0; +/* After calling this, the LDT is immutable. */ +static void finalize_ldt_struct(struct ldt_struct *ldt) +{ + paravirt_alloc_ldt(ldt->entries, ldt->size); } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +/* context.lock is held */ +static void install_ldt(struct mm_struct *current_mm, + struct ldt_struct *ldt) { - int err = alloc_ldt(new, old->size, 0); - int i; + /* Synchronizes with lockless_dereference in load_mm_ldt. */ + smp_store_release(¤t_mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using current_mm. */ + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); +} - if (err < 0) - return err; +static void free_ldt_struct(struct ldt_struct *ldt) +{ + if (likely(!ldt)) + return; - for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); - return 0; + paravirt_free_ldt(ldt->entries, ldt->size); + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(ldt->entries); + else + kfree(ldt->entries); + kfree(ldt); } /* @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + struct ldt_struct *new_ldt; struct mm_struct *old_mm; int retval = 0; mutex_init(&mm->context.lock); - mm->context.size = 0; old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); + if (!old_mm) { + mm->context.ldt = NULL; + return 0; } + + mutex_lock(&old_mm->context.lock); + if (!old_mm->context.ldt) { + mm->context.ldt = NULL; + goto out_unlock; + } + + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); + if (!new_ldt) { + retval = -ENOMEM; + goto out_unlock; + } + + memcpy(new_ldt->entries, old_mm->context.ldt->entries, + new_ldt->size * LDT_ENTRY_SIZE); + finalize_ldt_struct(new_ldt); + + mm->context.ldt = new_ldt; + +out_unlock: + mutex_unlock(&old_mm->context.lock); return retval; } @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { -#ifdef CONFIG_X86_32 - /* CHECKME: Can this ever happen ? */ - if (mm == current->active_mm) - clear_LDT(); -#endif - paravirt_free_ldt(mm->context.ldt, mm->context.size); - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - put_page(virt_to_page(mm->context.ldt)); - mm->context.size = 0; - } + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; } static int read_ldt(void __user *ptr, unsigned long bytecount) { - int err; + int retval; unsigned long size; struct mm_struct *mm = current->mm; - if (!mm->context.size) - return 0; + mutex_lock(&mm->context.lock); + + if (!mm->context.ldt) { + retval = 0; + goto out_unlock; + } + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; - mutex_lock(&mm->context.lock); - size = mm->context.size * LDT_ENTRY_SIZE; + size = mm->context.ldt->size * LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; - mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { + retval = -EFAULT; + goto out_unlock; + } + if (size != bytecount) { - /* zero-fill the rest */ - if (clear_user(ptr + size, bytecount - size) != 0) { - err = -EFAULT; - goto error_return; + /* Zero-fill the rest and pretend we read bytecount bytes. */ + if (clear_user(ptr + size, bytecount - size)) { + retval = -EFAULT; + goto out_unlock; } } - return bytecount; -error_return: - return err; + retval = bytecount; + +out_unlock: + mutex_unlock(&mm->context.lock); + return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) struct desc_struct ldt; int error; struct user_desc ldt_info; + int oldsize, newsize; + struct ldt_struct *new_ldt, *old_ldt; error = -EINVAL; if (bytecount != sizeof(ldt_info)) @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) goto out; } - mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, - ldt_info.entry_number + 1, 1); - if (error < 0) - goto out_unlock; - } - - /* Allow LDTs to be cleared by the user. */ - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { - if (oldmode || LDT_empty(&ldt_info)) { - memset(&ldt, 0, sizeof(ldt)); - goto install; + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || + LDT_empty(&ldt_info)) { + /* The user wants to clear the entry. */ + memset(&ldt, 0, sizeof(ldt)); + } else { + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out; } + + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; } - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { - error = -EINVAL; + mutex_lock(&mm->context.lock); + + old_ldt = mm->context.ldt; + oldsize = old_ldt ? old_ldt->size : 0; + newsize = max((int)(ldt_info.entry_number + 1), oldsize); + + error = -ENOMEM; + new_ldt = alloc_ldt_struct(newsize); + if (!new_ldt) goto out_unlock; - } - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + if (old_ldt) + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); + new_ldt->entries[ldt_info.entry_number] = ldt; + finalize_ldt_struct(new_ldt); - /* Install the new entry ... */ -install: - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); + install_ldt(mm, new_ldt); + free_ldt_struct(old_ldt); error = 0; out_unlock: diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71d7849a07f7..f6b916387590 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - if (dead_task->mm->context.size) { + if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, - dead_task->mm->context.size); + dead_task->mm->context.ldt->size); BUG(); } } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 206996c1669d..71820c42b6ce 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) COPY(r15); #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); +#else /* !CONFIG_X86_32 */ + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); +#endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, #else /* !CONFIG_X86_32 */ put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->cs, &sc->cs); - put_user_ex(0, &sc->__pad2); - put_user_ex(0, &sc->__pad1); - put_user_ex(regs->ss, &sc->ss); + put_user_ex(0, &sc->gs); + put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ put_user_ex(fpstate, &sc->fpstate); @@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, regs->sp = (unsigned long)frame; - /* - * Set up the CS and SS registers to run signal handlers in - * 64-bit mode, even if the handler happens to be interrupting - * 32-bit or 16-bit code. - * - * SS is subtle. In 64-bit mode, we don't need any particular - * SS descriptor, but we do need SS to be valid. It's possible - * that the old SS is entirely bogus -- this can happen if the - * signal we're trying to deliver is #GP or #SS caused by a bad - * SS value. - */ + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; - regs->ss = __USER_DS; return 0; } diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 9b4d51d0c0d0..6273324186ac 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -5,6 +5,7 @@ #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> +#include <asm/mmu_context.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { @@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re seg &= ~7UL; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(!child->mm->context.ldt || + (seg >> 3) >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { - desc = child->mm->context.ldt + seg; + desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index dc0a84a6f309..9e8bf13572e6 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) if (iter.mtrr_disabled) return mtrr_disabled_type(); + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); - /* not contained in any MTRRs. */ - if (type == -1) - return mtrr_default_type(mtrr_state); - return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cfba30f27392..e4308fe6afe8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr) static int __init arch_parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "old_map")) set_bit(EFI_OLD_MEMMAP, &efi.flags); if (parse_option_str(str, "debug")) diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 0d7dd1f5ac36..9ab52791fed5 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -22,6 +22,7 @@ #include <asm/fpu/internal.h> #include <asm/debugreg.h> #include <asm/cpu.h> +#include <asm/mmu_context.h> #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -153,7 +154,7 @@ static void fix_processor_context(void) syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ - load_LDT(¤t->active_mm->context); /* This does lldt */ + load_mm_ldt(current->active_mm); /* This does lldt */ fpu__resume_cpu(); } diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 7322755f337a..4b6e29ac0968 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ grant-table.o suspend.o platform-pci-unplug.o \ - p2m.o + p2m.o apic.o obj-$(CONFIG_EVENT_TRACING) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_DOM0) += apic.o vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0b95c9b8283f..11d6fb4e8483 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte_t pte; unsigned long pfn; struct page *page; + unsigned char dummy; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); @@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte = pfn_pte(pfn, prot); + /* + * Careful: update_va_mapping() will fail if the virtual address + * we're poking isn't populated in the page tables. We don't + * need to worry about the direct map (that's always in the page + * tables), but we need to be careful about vmap space. In + * particular, the top level page table can lazily propagate + * entries between processes, so if we've switched mms since we + * vmapped the target in the first place, we might not have the + * top-level page table entry populated. + * + * We disable preemption because we want the same mm active when + * we probe the target and when we issue the hypercall. We'll + * have the same nominal mm, but if we're a kernel thread, lazy + * mm dropping could change our pgd. + * + * Out of an abundance of caution, this uses __get_user() to fault + * in the target address just in case there's some obscure case + * in which the target address isn't readable. + */ + + preempt_disable(); + + pagefault_disable(); /* Avoid warnings due to being atomic. */ + __get_user(dummy, (unsigned char __user __force *)v); + pagefault_enable(); + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); @@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) BUG(); } else kmap_flush_unused(); + + preempt_enable(); } static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) @@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; int i; + /* + * We need to mark the all aliases of the LDT pages RO. We + * don't need to call vm_flush_aliases(), though, since that's + * only responsible for flushing aliases out the TLBs, not the + * page tables, and Xen will flush the TLB for us if needed. + * + * To avoid confusing future readers: none of this is necessary + * to load the LDT. The hypervisor only checks this when the + * LDT is faulted in due to subsequent descriptor access. + */ + for(i = 0; i < entries; i += entries_per_page) set_aliased_prot(ldt + i, PAGE_KERNEL_RO); } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c20fe29e65f4..2292721b1d10 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -101,17 +101,15 @@ struct dom0_vga_console_info; #ifdef CONFIG_XEN_DOM0 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); -void __init xen_init_apic(void); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { } -static inline void __init xen_init_apic(void) -{ -} #endif +void __init xen_init_apic(void); + #ifdef CONFIG_XEN_EFI extern void xen_efi_init(void); #else diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 81751a49d8bf..56486d92c4e7 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (!blk) return -ENOMEM; - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); - if (!present) { - kfree(blk); - return -ENOMEM; + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), + GFP_KERNEL); + if (!present) { + kfree(blk); + return -ENOMEM; + } + + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) + * sizeof(*present)); + } else { + present = rbnode->cache_present; } /* insert the register value in the correct place in the rbnode block */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d94529d5c8e9..bc67a93aa4f4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); @@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p\n", __func__, obj_request); + + if (obj_request_img_data_test(obj_request)) + rbd_osd_copyup_callback(obj_request); + else + obj_request_done_set(obj_request); +} + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, struct ceph_msg *msg) { @@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_discard_callback(obj_request); break; case CEPH_OSD_OP_CALL: + rbd_osd_call_callback(obj_request); + break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: rbd_osd_trivial_callback(obj_request); @@ -2530,13 +2543,15 @@ out_unwind: } static void -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; struct page **pages; u32 page_count; + dout("%s: obj %p\n", __func__, obj_request); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO || obj_request->type == OBJ_REQUEST_NODATA); rbd_assert(obj_request_img_data_test(obj_request)); @@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) if (!obj_request->result) obj_request->xferred = obj_request->length; - /* Finish up with the normal image object callback */ - - rbd_img_obj_callback(obj_request); + obj_request_done_set(obj_request); } static void @@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) /* All set, send it off. */ - orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; img_result = rbd_obj_request_submit(osdc, orig_request); if (!img_result) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ced96777b677..954c0029fb3b 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) return; } - if (work_pending(&blkif->persistent_purge_work)) { - pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); + if (work_busy(&blkif->persistent_purge_work)) { + pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); return; } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6d89ed35d80c..7a8a73f1fc04 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock); ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) static int blkfront_setup_indirect(struct blkfront_info *info); +static int blkfront_gather_backend_features(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_info *info) { @@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); - list_add(&indirect_page->lru, &info->indirect_pages); + if (!info->feature_persistent) { + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); + list_add(&indirect_page->lru, &info->indirect_pages); + } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } @@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info) info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; - rc = blkfront_setup_indirect(info); + rc = blkfront_gather_backend_features(info); if (rc) { kfree(copy); return rc; @@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_info *info) { - unsigned int indirect_segments, segs; + unsigned int segs; int err, i; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", &indirect_segments, - NULL); - if (err) { - info->max_indirect_segments = 0; + if (info->max_indirect_segments == 0) segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; - } else { - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + else segs = info->max_indirect_segments; - } err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); if (err) @@ -1797,6 +1793,68 @@ out_of_memory: } /* + * Gather all backend feature-* + */ +static int blkfront_gather_backend_features(struct blkfront_info *info) +{ + int err; + int barrier, flush, discard, persistent; + unsigned int indirect_segments; + + info->feature_flush = 0; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%d", &barrier, + NULL); + + /* + * If there's no "feature-barrier" defined, then it means + * we're dealing with a very old backend which writes + * synchronously; nothing to do. + * + * If there are barriers, then we use flush. + */ + if (!err && barrier) + info->feature_flush = REQ_FLUSH | REQ_FUA; + /* + * And if there is "feature-flush-cache" use that above + * barriers. + */ + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush, + NULL); + + if (!err && flush) + info->feature_flush = REQ_FLUSH; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-discard", "%d", &discard, + NULL); + + if (!err && discard) + blkfront_setup_discard(info); + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-persistent", "%u", &persistent, + NULL); + if (err) + info->feature_persistent = 0; + else + info->feature_persistent = persistent; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-max-indirect-segments", "%u", &indirect_segments, + NULL); + if (err) + info->max_indirect_segments = 0; + else + info->max_indirect_segments = min(indirect_segments, + xen_blkif_max_segments); + + return blkfront_setup_indirect(info); +} + +/* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ @@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int physical_sector_size; unsigned int binfo; int err; - int barrier, flush, discard, persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info) if (err != 1) physical_sector_size = sector_size; - info->feature_flush = 0; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier, - NULL); - - /* - * If there's no "feature-barrier" defined, then it means - * we're dealing with a very old backend which writes - * synchronously; nothing to do. - * - * If there are barriers, then we use flush. - */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; - /* - * And if there is "feature-flush-cache" use that above - * barriers. - */ - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush, - NULL); - - if (!err && flush) - info->feature_flush = REQ_FLUSH; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard, - NULL); - - if (!err && discard) - blkfront_setup_discard(info); - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%u", &persistent, - NULL); - if (err) - info->feature_persistent = 0; - else - info->feature_persistent = persistent; - - err = blkfront_setup_indirect(info); + err = blkfront_gather_backend_features(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index da8faf78536a..5643b65cee20 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused) static void start_khwrngd(void) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); - if (hwrng_fill == ERR_PTR(-ENOMEM)) { + if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed"); hwrng_fill = NULL; } diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e362860c2b50..cd593c1f66dc 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -20,7 +20,7 @@ #include <asm/clock.h> #include <asm/idle.h> -#include <asm/mach-loongson/loongson.h> +#include <asm/mach-loongson64/loongson.h> static uint nowait; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 7ba495f75370..402631a19a11 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) crypt->mode |= NPE_OP_NOT_IN_PLACE; /* This was never tested by Intel * for more than one dst buffer, I think. */ - BUG_ON(req->dst->length < nbytes); req_ctx->dst = NULL; if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, flags, DMA_FROM_DEVICE)) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 067402c7c2a9..df427c0e9e7b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -73,7 +73,8 @@ ICP_QAT_HW_CIPHER_KEY_CONVERT, \ ICP_QAT_HW_CIPHER_DECRYPT) -static atomic_t active_dev; +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; struct qat_alg_buf { uint32_t len; @@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { { int qat_algs_register(void) { - if (atomic_add_return(1, &active_dev) == 1) { + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) { int i; for (i = 0; i < ARRAY_SIZE(qat_algs); i++) @@ -1289,21 +1293,25 @@ int qat_algs_register(void) CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); } - return 0; + mutex_unlock(&algs_lock); + return ret; } int qat_algs_unregister(void) { - if (atomic_sub_return(1, &active_dev) == 0) - return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); - return 0; + int ret = 0; + + mutex_lock(&algs_lock); + if (--active_devs == 0) + ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + mutex_unlock(&algs_lock); + return ret; } int qat_algs_init(void) { - atomic_set(&active_dev, 0); crypto_get_default_rng(); return 0; } diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126d175..d3629b7482dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -48,6 +48,8 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ATC_MAX_DSCR_TRIALS 10 + /* * Initial number of descriptors to allocate for each channel. This could * be increased during dma usage. @@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA - * @desc: the descriptor containing the transfer width */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla, - struct at_desc *desc) +static inline int atc_calc_bytes_left(int current_len, u32 ctrla) { - return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); -} + u32 btsize = (ctrla & ATC_BTSIZE_MAX); + u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); -/** - * atc_calc_bytes_left_from_reg - calculates the number of bytes left according - * to the current value of CTRLA. - * - * @current_len: the number of bytes left before reading CTRLA - * @atchan: the channel to read CTRLA for - * @desc: the descriptor containing the transfer width - */ -static inline int atc_calc_bytes_left_from_reg(int current_len, - struct at_dma_chan *atchan, struct at_desc *desc) -{ - u32 ctrla = channel_readl(atchan, CTRLA); - - return atc_calc_bytes_left(current_len, ctrla, desc); + /* + * According to the datasheet, when reading the Control A Register + * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the + * number of transfers completed on the Source Interface. + * So btsize is always a number of source width transfers. + */ + return current_len - (btsize << src_width); } /** @@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr; + u32 ctrla, dscr, trials; /* * If the cookie doesn't match to the currently running transfer then @@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * the channel's DSCR register and compare it against the value * of the hardware linked list structure of each child * descriptor. + * + * The CTRLA register provides us with the amount of data + * already read from the source for the current child + * descriptor. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of + * data. + * + * However, the DSCR and CTRLA registers cannot be read both + * atomically. Hence a race condition may occur: the first read + * register may refer to one child descriptor whereas the second + * read may refer to a later child descriptor in the list + * because of the DMA transfer progression inbetween the two + * reads. + * + * One solution could have been to pause the DMA transfer, read + * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, + * this approach presents some drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns + * are more likey to occur depending on the system latency. + * Taking the USART driver as an example, it uses a cyclic DMA + * transfer to read data from the Receive Holding Register + * (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer + * to compute the residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather + * avoid to do so for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a + * first time, the CTRLA is read in turn, next the DSCR is read + * a second time. If the two consecutive read values of the DSCR + * are the same then we assume both refers to the very same + * child descriptor as well as the CTRLA value read inbetween + * does. For cyclic tranfers, the assumption is that a full loop + * is "not so fast". + * If the two DSCR values are different, we read again the CTRLA + * then the DSCR till two consecutive read values from DSCR are + * equal or till the maxium trials is reach. + * This algorithm is very unlikely not to find a stable value for + * DSCR. */ - ctrla = channel_readl(atchan, CTRLA); - rmb(); /* ensure CTRLA is read before DSCR */ dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + u32 new_dscr; + + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); + + /* + * If the DSCR register value has not changed inside the + * DMA controller since the previous read, we assume + * that both the dscr and ctrla values refers to the + * very same descriptor. + */ + if (likely(new_dscr == dscr)) + break; + + /* + * DSCR has changed inside the DMA controller, so the + * previouly read value of CTRLA may refer to an already + * processed descriptor hence could be outdated. + * We need to update ctrla to match the current + * descriptor. + */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + } + if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla, desc_first); + return atc_calc_bytes_left(ret, ctrla); ret -= desc_first->len; list_for_each_entry(desc, &desc_first->tx_list, desc_node) { @@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) } /* - * For the last descriptor in the chain we can calculate + * For the current descriptor in the chain we can calculate * the remaining bytes using the channel's register. - * Note that the transfer width of the first and last - * descriptor may differ. */ - if (!desc->lli.dscr) - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + ret = atc_calc_bytes_left(ret, ctrla); } else { /* single transfer */ - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); + ctrla = channel_readl(atchan, CTRLA); + ret = atc_calc_bytes_left(ret, ctrla); } return ret; @@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, desc->txd.cookie = -EBUSY; desc->total_len = desc->len = len; - desc->tx_width = dwidth; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first->txd.cookie = -EBUSY; first->total_len = len; - /* set transfer width for the calculation of the residue */ - first->tx_width = src_width; - prev->tx_width = src_width; - /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->total_len = total_len; - /* set transfer width for the calculation of the residue */ - first->tx_width = reg_width; - prev->tx_width = reg_width; - /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, desc->txd.cookie = 0; desc->len = len; - /* - * Although we only need the transfer width for the first and - * the last descriptor, its easier to set it to all descriptors. - */ - desc->tx_width = src_width; - atc_desc_chain(&first, &prev, desc); /* update the lengths and addresses for the next loop cycle */ @@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->total_len = buf_len; - first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5ebedd19..7f5a08230f76 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -112,6 +112,7 @@ #define ATC_SRC_WIDTH_BYTE (0x0 << 24) #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) #define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ #define ATC_DST_WIDTH(x) ((x) << 28) #define ATC_DST_WIDTH_BYTE (0x0 << 28) @@ -182,7 +183,6 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: descriptor byte count - * @tx_width: transfer width * @total_len: total transaction byte count */ struct at_desc { @@ -194,7 +194,6 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; - u32 tx_width; size_t total_len; /* Interleaved data */ diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213de7865..40afa2a16cfc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (at_xdmac_chan_is_cyclic(atchan)) { + if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); - } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - } else { - /* - * No need to write AT_XDMAC_CC reg, it will be done when the - * descriptor is fecthed. - */ + else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; - } + /* + * Even if the register will be updated from the configuration in the + * descriptor when using view 2 or higher, the PROT bit won't be set + * properly. This bit can be modified only by using the channel + * configuration register. + */ + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); reg |= AT_XDMAC_CNDC_NDDUP | AT_XDMAC_CNDC_NDSUP @@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->sconfig.dst_addr; } - desc->lld.mbr_cfg = atchan->cfg; - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + dwidth = at_xdmac_get_dwidth(atchan->cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + ? dwidth : AT_XDMAC_CC_DWIDTH_BYTE; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (len >> fixed_dwidth); /* microblock length */ + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | + AT_XDMAC_CC_DWIDTH(fixed_dwidth); dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ead2597..f1325f62563e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; - if (IS_ENABLED(__BIG_ENDIAN)) - config |= XOR_DESCRIPTOR_SWAP; - else - config &= ~XOR_DESCRIPTOR_SWAP; +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77b1d85..ecab4ea059b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } - last->last = false; + desc->last = false; dma_cookie_assign(&desc->txd); @@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_move_tail(&vd->node, &vc->desc_submitted); + list_add_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - if (async_tx_test_ack(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + + vc->desc_free(vd); if (cb) cb(cb_data); @@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - if (async_tx_test_ack(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,7 +29,6 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ - struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - return &vd->tx; } @@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all allocated, submitted and issued - * descriptors + * vchan_get_all_descriptors - obtain all submitted and issued descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { - list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { - struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..dff22ab01851 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -111,6 +111,7 @@ #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_BLK_MEM_RDY 0xD074 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF +#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 /* X-Gene SoC EFUSE csr register and bit defination */ #define XGENE_SOC_JTAG1_SHADOW 0x18 @@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, return -ENOMEM; } + pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; + /* Get efuse csr region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (!res) { diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 3515b381c131..711d8ad74f11 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) */ for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = &mci->csrows[row]; + struct csrow_info *csi = mci->csrows[row]; /* * Get the configuration settings for this diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 080d5cc27055..eebdf2a33bfe 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev) status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { dev_err(&pdev->dev, "failed to register extcon device\n"); - kfree(palmas_usb->edev->name); return status; } @@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->id_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->vbus_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev) return 0; } -static int palmas_usb_remove(struct platform_device *pdev) -{ - struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); - - kfree(palmas_usb->edev->name); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int palmas_usb_suspend(struct device *dev) { @@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = { static struct platform_driver palmas_usb_driver = { .probe = palmas_usb_probe, - .remove = palmas_usb_remove, .driver = { .name = "palmas-usb", .of_match_table = of_palmas_match_tbl, diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 76157ab9faf3..43b57b02d050 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id return -EINVAL; } -static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +static int find_cable_id_by_name(struct extcon_dev *edev, const char *name) { - unsigned int id = EXTCON_NONE; + unsigned int id = -EINVAL; int i = 0; - if (edev->max_supported == 0) - return -EINVAL; - - /* Find the the number of extcon cable */ + /* Find the id of extcon cable */ while (extcon_name[i]) { if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { id = i; break; } + i++; } - if (id == EXTCON_NONE) + return id; +} + +static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +{ + unsigned int id; + + if (edev->max_supported == 0) return -EINVAL; + /* Find the the number of extcon cable */ + id = find_cable_id_by_name(edev, name); + if (id < 0) + return id; + return find_cable_index_by_id(edev, id); } @@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev, struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_state); + int i = cable->cable_index; + return sprintf(buf, "%d\n", extcon_get_cable_state_(cable->edev, - cable->cable_index)); + cable->edev->supported_cable[i])); } /** @@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { + u32 old_state; + if (check_mutually_exclusive(edev, (edev->state & ~mask) | (state & mask))) { spin_unlock_irqrestore(&edev->lock, flags); return -EPERM; } - for (index = 0; index < edev->max_supported; index++) { - if (is_extcon_changed(edev->state, state, index, &attached)) - raw_notifier_call_chain(&edev->nh[index], attached, edev); - } - + old_state = edev->state; edev->state &= ~mask; edev->state |= state & mask; + for (index = 0; index < edev->max_supported; index++) { + if (is_extcon_changed(old_state, edev->state, index, + &attached)) + raw_notifier_call_chain(&edev->nh[index], + attached, edev); + } + /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (prop_buf) { @@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_); */ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) { - return extcon_get_cable_state_(edev, find_cable_index_by_name - (edev, cable_name)); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_get_cable_state_(edev, id); } EXPORT_SYMBOL_GPL(extcon_get_cable_state); @@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_); int extcon_set_cable_state(struct extcon_dev *edev, const char *cable_name, bool cable_state) { - return extcon_set_cable_state_(edev, find_cable_index_by_name - (edev, cable_name), cable_state); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_set_cable_state_(edev, id, cable_state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9fa8084a7c8d..d6144e3b97c5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -58,6 +58,11 @@ bool efi_runtime_disabled(void) static int __init parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "noruntime")) disable_runtime = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 31b00f91cfcd..f7b49d5ce4b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1130,6 +1130,9 @@ struct amdgpu_gfx { uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; + uint32_t rlc_feature_version; + uint32_t mec_feature_version; + uint32_t mec2_feature_version; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; @@ -1639,6 +1642,7 @@ struct amdgpu_sdma { /* SDMA firmware */ const struct firmware *fw; uint32_t fw_version; + uint32_t feature_version; struct amdgpu_ring ring; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9736892bcdf9..3bfe67de8349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_INFO_FW_GFX_RLC: fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = 0; + fw_info.feature = adev->gfx.rlc_feature_version; break; case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) + if (info->query_fw.index == 0) { fw_info.ver = adev->gfx.mec_fw_version; - else if (info->query_fw.index == 1) + fw_info.feature = adev->gfx.mec_feature_version; + } else if (info->query_fw.index == 1) { fw_info.ver = adev->gfx.mec2_fw_version; - else + fw_info.feature = adev->gfx.mec2_feature_version; + } else return -EINVAL; - fw_info.feature = 0; break; case AMDGPU_INFO_FW_SMC: fw_info.ver = adev->pm.fw_version; @@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (info->query_fw.index >= 2) return -EINVAL; fw_info.ver = adev->sdma[info->query_fw.index].fw_version; - fw_info.feature = 0; + fw_info.feature = adev->sdma[info->query_fw.index].feature_version; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f7a5efa21c2..f5c22556ec2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned height_in_mb = ALIGN(height / 16, 2); unsigned fs_in_mb = width_in_mb * height_in_mb; - unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size; image_size = width * height; image_size += image_size / 2; @@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; min_dpb_size = image_size * num_dpb_buffer; + min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) + * 16 * num_dpb_buffer + 52 * 1024; break; default: @@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; + buf_sizes[0x4] = min_ctx_size; return 0; } @@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) return -EINVAL; } + } else if (cmd == 0x206) { + if ((end - start) < ctx->buf_sizes[4]) { + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, + (unsigned)(end - start), + ctx->buf_sizes[4]); + return -EINVAL; + } } else if ((cmd != 0x100) && (cmd != 0x204)) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; @@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) struct amdgpu_uvd_cs_ctx ctx = {}; unsigned buf_sizes[] = { [0x00000000] = 2048, - [0x00000001] = 32 * 1024 * 1024, - [0x00000002] = 2048 * 1152 * 3, + [0x00000001] = 0xFFFFFFFF, + [0x00000002] = 0xFFFFFFFF, [0x00000003] = 2048, + [0x00000004] = 0xFFFFFFFF, }; struct amdgpu_ib *ib = &parser->ibs[ib_idx]; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ab83cc1ca4cc..15df46c93f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2db6ab0a543d..0d8bf2cb1956 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu( + mec_hdr->ucode_feature_version); gfx_v7_0_cp_compute_enable(adev, false); @@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + mec2_hdr->ucode_feature_version); /* MEC2 */ fw_data = (const __le32 *) @@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu( + hdr->ucode_feature_version); gfx_v7_0_rlc_stop(adev); @@ -5122,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle) dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", RREG32(mmCP_HPD_EOP_CONTROL)); - for (queue = 0; queue < 8; i++) { + for (queue = 0; queue < 8; queue++) { cik_srbm_select(adev, me, pipe, queue, 0); dev_info(adev->dev, " queue: %d\n", queue); dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9e1d4ddbf475..20e2cfd521d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; DRM_DEBUG("\n"); @@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); @@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); @@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); @@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = le32_to_cpu( + cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + cp_hdr->ucode_feature_version); } else { err = 0; adev->gfx.mec2_fw = NULL; @@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; switch (adev->pdev->revision) { case 0xc4: @@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcc: /* B10 */ adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc5: case 0x81: @@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcd: /* B8 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc6: case 0xca: case 0xce: /* B6 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc7: case 0x87: @@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) default: /* B4 */ adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; break; } @@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); - adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); amdgpu_ucode_print_gfx_hdr(&me_hdr->header); - adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); - adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); - adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); - adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); - adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); - adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); gfx_v8_0_cp_gfx_enable(adev, false); @@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); - adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec_fw->data + @@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); - adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + @@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - 0x7FFFF << 2); + AMDGPU_DOORBELL_MEC_RING7 << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index d7895885fe0c..a988dfb1d394 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 7bb37b93993f..2b86569b18d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5b59d5ad7d1c..9dcc7280e572 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } funcs = connector->helper_private; - new_encoder = funcs->best_encoder(connector); + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, + connector_state); + else + new_encoder = funcs->best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } } + if (WARN_ON(!connector_state->crtc)) + return -EINVAL; + connector_state->best_encoder = new_encoder; idx = drm_crtc_index(connector_state->crtc); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 778bbb6425b8..b0487c9f018c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1294,7 +1294,6 @@ retry: goto retry; } DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); - WARN(1, "fail\n"); return -EIO; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f9cc68fbd2a3..b50fa0afd907 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600) module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); static void store_vblank(struct drm_device *dev, int crtc, - unsigned vblank_count_inc, + u32 vblank_count_inc, struct timeval *t_vblank) { struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index fe1599d75f14..424228be79ae 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -606,8 +606,6 @@ static void tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, uint8_t *buf, size_t size) { - buf[PB(0)] = tda998x_cksum(buf, size); - reg_clear(priv, REG_DIP_IF_FLAGS, bit); reg_write_range(priv, addr, buf, size); reg_set(priv, REG_DIP_IF_FLAGS, bit); @@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) buf[PB(4)] = p->audio_frame[4]; buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ + buf[PB(0)] = tda998x_cksum(buf, sizeof(buf)); + tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, sizeof(buf)); } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 198fc3c3291b..3dcd59e694db 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv, const union child_device_config *p_child; union child_device_config *child_dev_ptr; int i, child_device_num, count; - u16 block_size; + u8 expected_size; + u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (p_defs->child_dev_size < sizeof(*p_child)) { - DRM_ERROR("General definiton block child device size is too small.\n"); + if (bdb->version < 195) { + expected_size = 33; + } else if (bdb->version == 195) { + expected_size = 37; + } else if (bdb->version <= 197) { + expected_size = 38; + } else { + expected_size = 38; + DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", + expected_size, bdb->version); + } + + if (expected_size > sizeof(*p_child)) { + DRM_ERROR("child_device_config cannot fit in p_child\n"); + return; + } + + if (p_defs->child_dev_size != expected_size) { + DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", + p_defs->child_dev_size, expected_size, bdb->version); return; } /* get the block size of general definitions */ @@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, child_dev_ptr = dev_priv->vbt.child_dev + count; count++; - memcpy(child_dev_ptr, p_child, sizeof(*p_child)); + memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); } return; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 6e4cc5334f47..600afdbef8c9 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, return MODE_OK; } +static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct intel_crtc *crtc = to_intel_crtc(state->crtc); + + return &intel_dp->mst_encoders[crtc->pipe]->base.base; +} + static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, + .atomic_best_encoder = intel_mst_atomic_best_encoder, .best_encoder = intel_mst_best_encoder, }; diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 3511bbaba505..e3c63640df73 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -462,12 +462,15 @@ out: static void hidinput_cleanup_battery(struct hid_device *dev) { + const struct power_supply_desc *psy_desc; + if (!dev->battery) return; + psy_desc = dev->battery->desc; power_supply_unregister(dev->battery); - kfree(dev->battery->desc->name); - kfree(dev->battery->desc); + kfree(psy_desc->name); + kfree(psy_desc); dev->battery = NULL; } #else /* !CONFIG_HID_BATTERY_STRENGTH */ diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 94167310e15a..b905d501e752 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev) for (p = drvdata->rdesc; p <= drvdata->rdesc + drvdata->rsize - 4;) { if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && - p[3] < sizeof(params)) { + p[3] < ARRAY_SIZE(params)) { v = params[p[3]]; put_unaligned(cpu_to_le32(v), (s32 *)p); p += 4; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 44958d79d598..01b937e63cf3 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1284,6 +1284,39 @@ fail_register_pen_input: return error; } +/* + * Not all devices report physical dimensions from HID. + * Compute the default from hardcoded logical dimension + * and resolution before driver overwrites them. + */ +static void wacom_set_default_phy(struct wacom_features *features) +{ + if (features->x_resolution) { + features->x_phy = (features->x_max * 100) / + features->x_resolution; + features->y_phy = (features->y_max * 100) / + features->y_resolution; + } +} + +static void wacom_calculate_res(struct wacom_features *features) +{ + /* set unit to "100th of a mm" for devices not reported by HID */ + if (!features->unit) { + features->unit = 0x11; + features->unitExpo = -3; + } + + features->x_resolution = wacom_calc_hid_res(features->x_max, + features->x_phy, + features->unit, + features->unitExpo); + features->y_resolution = wacom_calc_hid_res(features->y_max, + features->y_phy, + features->unit, + features->unitExpo); +} + static void wacom_wireless_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, work); @@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.type != INTUOSHT && wacom_wac1->features.type != BAMBOO_PT) wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; + wacom_set_default_phy(&wacom_wac1->features); + wacom_calculate_res(&wacom_wac1->features); snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", @@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; + wacom_set_default_phy(&wacom_wac2->features); wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; + wacom_calculate_res(&wacom_wac2->features); snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, "%s (WL) Finger",wacom_wac2->features.name); snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, @@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work) } } -/* - * Not all devices report physical dimensions from HID. - * Compute the default from hardcoded logical dimension - * and resolution before driver overwrites them. - */ -static void wacom_set_default_phy(struct wacom_features *features) -{ - if (features->x_resolution) { - features->x_phy = (features->x_max * 100) / - features->x_resolution; - features->y_phy = (features->y_max * 100) / - features->y_resolution; - } -} - -static void wacom_calculate_res(struct wacom_features *features) -{ - /* set unit to "100th of a mm" for devices not reported by HID */ - if (!features->unit) { - features->unit = 0x11; - features->unitExpo = -3; - } - - features->x_resolution = wacom_calc_hid_res(features->x_max, - features->x_phy, - features->unit, - features->unitExpo); - features->y_resolution = wacom_calc_hid_res(features->y_max, - features->y_phy, - features->unit, - features->unitExpo); -} - static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 37c16afe007a..c8487894b312 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); +static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { + { + /* + * CPU fan speed going up and down on Dell Studio XPS 8100 + * for unknown reasons. + */ + .ident = "Dell Studio XPS 8100", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), + }, + }, + { } +}; + /* * Probe for the presence of a supported laptop. */ @@ -940,7 +955,8 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table) || + dmi_check_system(i8k_blacklist_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 9b55e673b67c..85d106fe3ce8 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = { { .compatible = "gmt,g763" }, { }, }; +MODULE_DEVICE_TABLE(of, g762_dt_match); /* * Grab clock (a required property), enable it, get (fixed) clock frequency diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 6153df735e82..08ff89d222e5 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = { {"nct7904", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, nct7904_id); static struct i2c_driver nct7904_driver = { .class = I2C_CLASS_HWMON, diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index af162b4c7a6d..025686d41640 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iface); - dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " + dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, " "regs_base@%p\n", iface->regs_base); return 0; @@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); -MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); +MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-bfin-twi"); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index d1c22e3fdd14..fc9bf7f30e35 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* enable test mode */ reg |= OMAP_I2C_SYSTEST_ST_EN; + /* select SDA/SCL IO mode */ + reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT; + /* set SCL to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SCL_O; + /* set SDA to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } @@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* restore reset values */ reg &= ~OMAP_I2C_SYSTEST_ST_EN; + reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK; + reg &= ~OMAP_I2C_SYSTEST_SCL_O; + reg &= ~OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index e6d4935161e4..c83e4d13cfc5 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + bri->set_scl(adap, val); + ndelay(RECOVERY_NDELAY); + /* * By this time SCL is high, as we need to give 9 falling-rising edges */ @@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { - adap->bus_recovery_info->set_scl(adap, 1); return i2c_generic_recovery(adap); } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); @@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data) struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { struct device *dev; + struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_client(dev); + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } EXPORT_SYMBOL(of_find_i2c_device_by_node); @@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node); struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) { struct device *dev; + struct i2c_adapter *adapter; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_adapter(dev); + adapter = i2c_verify_adapter(dev); + if (!adapter) + put_device(dev); + + return adapter; } EXPORT_SYMBOL(of_find_i2c_adapter_by_node); #else diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 822374654609..1da449614779 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); @@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 27b6a3ce18ca..891797ad76bc 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) if (n_buttons[i] < 1) continue; - if (n_buttons[i] > 6) { + if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 10e140af5aac..1ac898db303a 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver); MODULE_DESCRIPTION("axp20x Power Button"); MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:axp20x-pek"); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fc17b9592f54..10c4e3d462f1 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, if (pdata && pdata->coexist) return true; - if (of_find_node_by_name(node, "codec")) { + node = of_find_node_by_name(node, "codec"); + if (node) { of_node_put(node); return true; } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 113d6f1516a5..4d246861d692 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -20,6 +20,7 @@ #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/libps2.h> +#include <linux/dmi.h> #include "psmouse.h" #include "alps.h" @@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ +#define ALPS_DELL 0x100 /* device is a Dell laptop */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { @@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) return; } - /* Non interleaved V2 dualpoint has separate stick button bits */ + /* Dell non interleaved V2 dualpoint has separate stick button bits */ if (priv->proto_version == ALPS_PROTO_V2 && - priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { + priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) { left |= packet[0] & 1; right |= packet[0] & 2; middle |= packet[0] & 4; @@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse, priv->byte0 = protocol->byte0; priv->mask0 = protocol->mask0; priv->flags = protocol->flags; + if (dmi_name_in_vendors("Dell")) + priv->flags |= ALPS_DELL; priv->x_max = 2000; priv->y_max = 1400; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 22b9ca901f4e..2955f1d0ca6c 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; unsigned char packet_type = packet[3] & 0x03; + unsigned int ic_version; bool sanity_check; if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) return PACKET_TRACKPOINT; + /* This represents the version of IC body. */ + ic_version = (etd->fw_version & 0x0f0000) >> 16; + /* * Sanity check based on the constant bits of a packet. * The constant bits change depending on the value of - * the hardware flag 'crc_enabled' but are the same for - * every packet, regardless of the type. + * the hardware flag 'crc_enabled' and the version of + * the IC body, but are the same for every packet, + * regardless of the type. */ if (etd->crc_enabled) sanity_check = ((packet[3] & 0x08) == 0x00); + else if (ic_version == 7 && etd->samples[1] == 0x2A) + sanity_check = ((packet[3] & 0x1c) == 0x10); else sanity_check = ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0x1c) == 0x10); @@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Avatar AVIU-145A2 0x361f00 ? clickpad * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) @@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse) etd->capabilities[0], etd->capabilities[1], etd->capabilities[2]); + if (etd->hw_version != 1) { + if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) { + psmouse_err(psmouse, "failed to query sample data\n"); + goto init_fail; + } + psmouse_info(psmouse, + "Elan sample query result %02x, %02x, %02x\n", + etd->samples[0], etd->samples[1], etd->samples[2]); + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index f965d1569cc3..e1cbf409d9c8 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -129,6 +129,7 @@ struct elantech_data { unsigned char reg_26; unsigned char debug; unsigned char capabilities[3]; + unsigned char samples[3]; bool paritycheck; bool jumpy_cursor; bool reports_pressure; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b7d54d428b5e..ff4be0515a0d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index 1a57e88a38f7..cd35079c8c98 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -7,7 +7,7 @@ #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> -#include <linux/init.h> +#include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 32814371b8d3..aa1b41ca40f7 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -1471,5 +1471,3 @@ module_exit(mq_exit); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mq cache policy"); - -MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 48a4a826ae07..200366c62231 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1789,3 +1789,5 @@ module_exit(smq_exit); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("smq cache policy"); + +MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 48dfe3c4d6aa..6ba47cfb1443 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) return r; disk_super = dm_block_data(copy); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); dm_sm_dec_block(pmd->metadata_sm, held_root); return dm_tm_unlock(pmd->tm, copy); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ab37ae114e94..0d7ab20c58df 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors, max_size = 0; + sector_t max_sectors; + int max_size = 0; if (unlikely(!map)) goto out; @@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q, * Find maximum amount of I/O that won't need splitting */ max_sectors = min(max_io_len(bvm->bi_sector, ti), - (sector_t) queue_max_sectors(q)); + (sector_t) BIO_MAX_SECTORS); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - - /* - * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t - * to the targets' merge function since it holds sectors not bytes). - * Just doing this as an interim fix for stable@ because the more - * comprehensive cleanup of switching to sector_t will impact every - * DM target that implements a ->merge hook. - */ - if (max_size > INT_MAX) - max_size = INT_MAX; + if (max_size < 0) + max_size = 0; /* * merge_bvec_fn() returns number of bytes @@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); + max_size = ti->type->merge(ti, bvm, biovec, max_size); /* * If the target doesn't support merge method and some of the devices - * provided their merge_bvec method (we know this by looking for the - * max_hw_sectors that dm_set_device_limits may set), then we can't - * allow bios with multiple vector entries. So always set max_size - * to 0, and the code below allows just one page. + * provided their merge_bvec method (we know this by looking at + * queue_max_hw_sectors), then we can't allow bios with multiple vector + * entries. So always set max_size to 0, and the code below allows + * just one page. */ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) max_size = 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c2a4e8b873c..e25f00f0138a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg) char *ptr; int err; - file = kmalloc(sizeof(*file), GFP_NOIO); + file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index bf2b80d5c470..8731b6ea026b 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key); extern struct dm_block_validator btree_node_validator; +/* + * Value type for upper levels of multi-level btrees. + */ +extern void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt); + #endif /* DM_BTREE_INTERNAL_H */ diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 9836c0ae897c..4222f774cf36 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { @@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = remove_raw(&spine, info, (level == last_level ? - &info->value_type : &le64_type), + &info->value_type : &le64_vt), root, keys[level], (unsigned *)&index); if (r < 0) break; @@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; uint64_t k; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < last_level; level++) { - r = remove_raw(&spine, info, &le64_type, + r = remove_raw(&spine, info, &le64_vt, root, keys[level], (unsigned *) &index); if (r < 0) goto out; @@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, value_ptr(n, index)); delete_at(n, index); + keys[last_level] = k + 1ull; } else r = -ENODATA; diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index 1b5e13ec7f96..0dee514ba4c5 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s) { return s->root; } + +static void le64_inc(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_inc(tm, le64_to_cpu(v_le)); +} + +static void le64_dec(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_dec(tm, le64_to_cpu(v_le)); +} + +static int le64_equal(void *context, const void *value1_le, const void *value2_le) +{ + __le64 v1_le, v2_le; + + memcpy(&v1_le, value1_le, sizeof(v1_le)); + memcpy(&v2_le, value2_le, sizeof(v2_le)); + return v1_le == v2_le; +} + +void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt) +{ + vt->context = tm; + vt->size = sizeof(__le64); + vt->inc = le64_inc; + vt->dec = le64_dec; + vt->equal = le64_equal; +} diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index fdd3793e22f9..c7726cebc495 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, struct btree_node *n; struct dm_btree_value_type le64_type; - le64_type.context = NULL; - le64_type.size = sizeof(__le64); - le64_type.inc = NULL; - le64_type.dec = NULL; - le64_type.equal = NULL; - + init_le64_type(info->tm, &le64_type); init_shadow_spine(&spine, info); for (level = 0; level < (info->levels - 1); level++) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94f5b55069e0..967a4ed73929 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; + unsigned long flags; /* * If it is not operational, then we have already marked it as dead @@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); + spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); - spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. */ @@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev) * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. + * device_lock used to avoid races with raid1_end_read_request + * which expects 'In_sync' flags and ->degraded to be consistent. */ + spin_lock_irqsave(&conf->device_lock, flags); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev) sysfs_notify_dirent_safe(rdev->sysfs_state); } } - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 643d217bfa13..f757023fc458 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) static int drop_one_stripe(struct r5conf *conf) { struct stripe_head *sh; - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; spin_lock_irq(conf->hash_locks + hash); sh = get_free_stripe(conf, hash); @@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, if (mutex_trylock(&conf->cache_size_mutex)) { ret= 0; - while (ret < sc->nr_to_scan) { + while (ret < sc->nr_to_scan && + conf->max_nr_stripes > conf->min_nr_stripes) { if (drop_one_stripe(conf) == 0) { ret = SHRINK_STOP; break; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 653815950aa2..3f68dd251ce8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C config MFD_CROS_EC_SPI tristate "ChromeOS Embedded Controller (SPI)" - depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF + depends on MFD_CROS_EC && CROS_EC_PROTO && SPI ---help--- If you say Y here, you get support for talking to the ChromeOS EC diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bebf58a06a6b..a72ddb295078 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev) arizona->has_fully_powered_off = true; - disable_irq(arizona->irq); + disable_irq_nosync(arizona->irq); arizona_enable_reset(arizona); regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); @@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.gpio_defaults[i]); } - pm_runtime_set_autosuspend_delay(arizona->dev, 100); - pm_runtime_use_autosuspend(arizona->dev); - pm_runtime_enable(arizona->dev); - /* Chip default */ if (!arizona->pdata.clk32k_src) arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; @@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.spk_fmt[i]); } + pm_runtime_set_active(arizona->dev); + pm_runtime_enable(arizona->dev); + /* Set up for interrupts */ ret = arizona_irq_init(arizona); if (ret != 0) goto err_reset; + pm_runtime_set_autosuspend_delay(arizona->dev, 100); + pm_runtime_use_autosuspend(arizona->dev); + arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", arizona_clkgen_err, arizona); arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", @@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona) goto err_irq; } -#ifdef CONFIG_PM - regulator_disable(arizona->dcvdd); -#endif - return 0; err_irq: diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2d3db81be099..6ded3dc36644 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, { struct at24_data *at24; - if (unlikely(off >= attr->size)) - return -EFBIG; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0c627b4733ca..2d7d72c88519 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond) slave ? slave->dev->name : "NULL"); if (!slave || !bond->send_peer_notif || + !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2d1ce3c5d0dd..753887d02b46 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev) vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { - int j; pr_emerg("%s: no memory for rx ring\n", dev->name); - for (j = 0; j < i; j++) { - if (vp->rx_skbuff[j]) { - dev_kfree_skb(vp->rx_skbuff[j]); - vp->rx_skbuff[j] = NULL; - } - } retval = -ENOMEM; - goto err_free_irq; + goto err_free_skb; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); @@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev) if (!retval) goto out; -err_free_irq: +err_free_skb: + for (i = 0; i < RX_RING_SIZE; i++) { + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } free_irq(dev->irq, dev); err: if (vortex_debug > 1) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1637de6caf46..a2bb1f0934d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -264,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, if (likely(skb)) { (*pkts_compl)++; (*bytes_compl) += skb->len; + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 6f909077b919..aeb7ce64452e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1723,6 +1723,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, offset += sizeof(u32); data_buf += sizeof(u32); written_so_far += sizeof(u32); + + /* At end of each 4Kb page, release nvram lock to allow MFW + * chance to take it for its own use. + */ + if ((cmd_flags & MCPR_NVM_COMMAND_LAST) && + (written_so_far < buf_size)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Releasing NVM lock after offset 0x%x\n", + (u32)(offset - sizeof(u32))); + bnx2x_release_nvram_lock(bp); + usleep_range(1000, 2000); + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + } + cmd_flags = 0; } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 0612b19f6313..506047c38607 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (!next_cmpl->valid) break; } + packets++; /* TODO: BNA_CQ_EF_LOCAL ? */ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | @@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); - packets++; rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; ccb->bytes_per_intr += totlen; diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 358442087878..9b35d142f47a 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" depends on 64BIT - default ARCH_THUNDER select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -29,14 +28,12 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT - default ARCH_THUNDER select PHYLIB select MDIO_OCTEON ---help--- diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index ce075d11fb3f..6700dcc9960e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2778,10 +2778,11 @@ int t4_setup_debugfs(struct adapter *adap) EXT_MEM1_SIZE_G(size)); } } else { - if (i & EXT_MEM_ENABLE_F) + if (i & EXT_MEM_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); + } } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 36d835bd5f3c..7d178bdb112e 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -620,6 +620,11 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) +#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) + +#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d86bc5d52246..15cc3a1f12ff 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) return 0; + /* if device is not running, copy MAC to netdev->dev_addr */ + if (!netif_running(netdev)) + goto done; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - dev_info(dev, "MAC address changed to %pM\n", mac); +done: + ether_addr_copy(netdev->dev_addr, addr->sa_data); + dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); @@ -2452,10 +2456,24 @@ static void be_eq_clean(struct be_eq_obj *eqo) be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); } -static void be_rx_cq_clean(struct be_rx_obj *rxo) +/* Free posted rx buffers that were not used */ +static void be_rxq_clean(struct be_rx_obj *rxo) { - struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; + + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); + rxq->tail = 0; + rxq->head = 0; +} + +static void be_rx_cq_clean(struct be_rx_obj *rxo) +{ struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; @@ -2492,16 +2510,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) /* After cleanup, leave the CQ in unarmed state */ be_cq_notify(adapter, rx_cq->id, false, 0); - - /* Then free posted rx buffers that were not used */ - while (atomic_read(&rxq->used) > 0) { - page_info = get_rx_page_info(rxo); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - } - BUG_ON(atomic_read(&rxq->used)); - rxq->tail = 0; - rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -2581,8 +2589,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); + free_cpumask_var(eqo->affinity_mask); } - free_cpumask_var(eqo->affinity_mask); be_queue_free(adapter, &eqo->q); } } @@ -2599,13 +2607,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); - if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) - return -ENOMEM; - cpumask_set_cpu(cpumask_local_spread(i, numa_node), - eqo->affinity_mask); - netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->idx = i; @@ -2621,6 +2623,14 @@ static int be_evt_queues_create(struct be_adapter *adapter) rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; + + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); } return 0; } @@ -3359,13 +3369,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) for_all_rx_queues(adapter, rxo, i) { q = &rxo->q; if (q->created) { + /* If RXQs are destroyed while in an "out of buffer" + * state, there is a possibility of an HW stall on + * Lancer. So, post 64 buffers to each queue to relieve + * the "out of buffer" condition. + * Make sure there's space in the RXQ before posting. + */ + if (lancer_chip(adapter)) { + be_rx_cq_clean(rxo); + if (atomic_read(&q->used) == 0) + be_post_rx_frags(rxo, GFP_KERNEL, + MAX_RX_POST); + } + be_cmd_rxq_destroy(adapter, q); be_rx_cq_clean(rxo); + be_rxq_clean(rxo); } be_queue_free(adapter, q); } } +static void be_disable_if_filters(struct be_adapter *adapter) +{ + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[0], 0); + + be_clear_uc_list(adapter); + + /* The IFACE flags are enabled in the open path and cleared + * in the close path. When a VF gets detached from the host and + * assigned to a VM the following happens: + * - VF's IFACE flags get cleared in the detach path + * - IFACE create is issued by the VF in the attach path + * Due to a bug in the BE3/Skyhawk-R FW + * (Lancer FW doesn't have the bug), the IFACE capability flags + * specified along with the IFACE create cmd issued by a VF are not + * honoured by FW. As a consequence, if a *new* driver + * (that enables/disables IFACE flags in open/close) + * is loaded in the host and an *old* driver is * used by a VM/VF, + * the IFACE gets created *without* the needed flags. + * To avoid this, disable RX-filter flags only for Lancer. + */ + if (lancer_chip(adapter)) { + be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF); + adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS; + } +} + static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3378,6 +3429,8 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + be_disable_if_filters(adapter); + be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3397,7 +3450,6 @@ static int be_close(struct net_device *netdev) be_tx_compl_clean(adapter); be_rx_qs_destroy(adapter); - be_clear_uc_list(adapter); for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) @@ -3482,6 +3534,31 @@ static int be_rx_qs_create(struct be_adapter *adapter) return 0; } +static int be_enable_if_filters(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + if (status) + return status; + + /* For BE3 VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr, + adapter->if_handle, + &adapter->pmac_id[0], 0); + if (status) + return status; + } + + if (adapter->vlans_added) + be_vid_config(adapter); + + be_set_rx_mode(adapter->netdev); + + return 0; +} + static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3495,6 +3572,10 @@ static int be_open(struct net_device *netdev) if (status) goto err; + status = be_enable_if_filters(adapter); + if (status) + goto err; + status = be_irq_register(adapter); if (status) goto err; @@ -3685,16 +3766,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -static void be_mac_clear(struct be_adapter *adapter) -{ - if (adapter->pmac_id) { - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[0], 0); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - } -} - #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { @@ -3769,8 +3840,8 @@ static int be_clear(struct be_adapter *adapter) #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); #endif - /* delete the primary mac along with the uc-mac list */ - be_mac_clear(adapter); + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -3781,25 +3852,11 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static int be_if_create(struct be_adapter *adapter, u32 *if_handle, - u32 cap_flags, u32 vf) -{ - u32 en_flags; - - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - - en_flags &= cap_flags; - - return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); -} - static int be_vfs_if_create(struct be_adapter *adapter) { struct be_resources res = {0}; + u32 cap_flags, en_flags, vf; struct be_vf_cfg *vf_cfg; - u32 cap_flags, vf; int status; /* If a FW profile exists, then cap_flags are updated */ @@ -3820,8 +3877,12 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - status = be_if_create(adapter, &vf_cfg->if_handle, - cap_flags, vf + 1); + en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST | + BE_IF_FLAGS_PASS_L3L4_ERRORS); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) return status; } @@ -4193,15 +4254,8 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } else { - /* Maybe the HW was reset; dev_addr must be re-programmed */ - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* For BE3-R VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); return 0; } @@ -4341,6 +4395,7 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 en_flags; int status; status = be_func_init(adapter); @@ -4363,8 +4418,11 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - status = be_if_create(adapter, &adapter->if_handle, - be_if_cap_flags(adapter), 0); + /* will enable all the needed filter flags in be_open() */ + en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + en_flags = en_flags & be_if_cap_flags(adapter); + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); if (status) goto err; @@ -4390,11 +4448,6 @@ static int be_setup(struct be_adapter *adapter) dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } - if (adapter->vlans_added) - be_vid_config(adapter); - - be_set_rx_mode(adapter->netdev); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d8e07bb7e601..787da8e54e99 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3438,6 +3438,7 @@ fec_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 56316db6c5a6..cf8e54652df9 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) frag = skb_shinfo(skb)->frags; while (nr_frags) { CBDC_SC(bdp, - BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | + BD_ENET_TX_TC); CBDS_SC(bdp, BD_ENET_TX_READY); if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b34214e2df5f..016743e355de 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) -#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 555e461b0cfe..6bdc89179b72 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -902,27 +902,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) return 0; } -static int gfar_comp_asc(const void *a, const void *b) -{ - return memcmp(a, b, 4); -} - -static int gfar_comp_desc(const void *a, const void *b) -{ - return -memcmp(a, b, 4); -} - -static void gfar_swap(void *a, void *b, int size) -{ - u32 *_a = a; - u32 *_b = b; - - swap(_a[0], _b[0]); - swap(_a[1], _b[1]); - swap(_a[2], _b[2]); - swap(_a[3], _b[3]); -} - /* Write a mask to filer cache */ static void gfar_set_mask(u32 mask, struct filer_table *tab) { @@ -1272,310 +1251,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, return 0; } -/* Copy size filer entries */ -static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) -{ - while (size > 0) { - size--; - dst[size].ctrl = src[size].ctrl; - dst[size].prop = src[size].prop; - } -} - -/* Delete the contents of the filer-table between start and end - * and collapse them - */ -static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) -{ - int length; - - if (end > MAX_FILER_CACHE_IDX || end < begin) - return -EINVAL; - - end++; - length = end - begin; - - /* Copy */ - while (end < tab->index) { - tab->fe[begin].ctrl = tab->fe[end].ctrl; - tab->fe[begin++].prop = tab->fe[end++].prop; - - } - /* Fill up with don't cares */ - while (begin < tab->index) { - tab->fe[begin].ctrl = 0x60; - tab->fe[begin].prop = 0xFFFFFFFF; - begin++; - } - - tab->index -= length; - return 0; -} - -/* Make space on the wanted location */ -static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) -{ - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || - begin > MAX_FILER_CACHE_IDX) - return -EINVAL; - - gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); - - tab->index += length; - return 0; -} - -static int gfar_get_next_cluster_start(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_AND | RQFCR_CLE)) - return start; - } - return -1; -} - -static int gfar_get_next_cluster_end(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_CLE)) - return start; - } - return -1; -} - -/* Uses hardwares clustering option to reduce - * the number of filer table entries - */ -static void gfar_cluster_filer(struct filer_table *tab) -{ - s32 i = -1, j, iend, jend; - - while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { - j = i; - while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* The cluster entries self and the previous one - * (a mask) must be identical! - */ - if (tab->fe[i].ctrl != tab->fe[j].ctrl) - break; - if (tab->fe[i].prop != tab->fe[j].prop) - break; - if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) - break; - if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) - break; - iend = gfar_get_next_cluster_end(i, tab); - jend = gfar_get_next_cluster_end(j, tab); - if (jend == -1 || iend == -1) - break; - - /* First we make some free space, where our cluster - * element should be. Then we copy it there and finally - * delete in from its old location. - */ - if (gfar_expand_filer_entries(iend, (jend - j), tab) == - -EINVAL) - break; - - gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); - - if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), - tab) == -EINVAL) - return; - - /* Mask out cluster bit */ - tab->fe[iend].ctrl &= ~(RQFCR_CLE); - } - } -} - -/* Swaps the masked bits of a1<>a2 and b1<>b2 */ -static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, - struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) -{ - u32 temp[4]; - temp[0] = a1->ctrl & mask; - temp[1] = a2->ctrl & mask; - temp[2] = b1->ctrl & mask; - temp[3] = b2->ctrl & mask; - - a1->ctrl &= ~mask; - a2->ctrl &= ~mask; - b1->ctrl &= ~mask; - b2->ctrl &= ~mask; - - a1->ctrl |= temp[1]; - a2->ctrl |= temp[0]; - b1->ctrl |= temp[3]; - b2->ctrl |= temp[2]; -} - -/* Generate a list consisting of masks values with their start and - * end of validity and block as indicator for parts belonging - * together (glued by ANDs) in mask_table - */ -static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) -{ - u32 i, and_index = 0, block_index = 1; - - for (i = 0; i < tab->index; i++) { - - /* LSByte of control = 0 sets a mask */ - if (!(tab->fe[i].ctrl & 0xF)) { - mask_table[and_index].mask = tab->fe[i].prop; - mask_table[and_index].start = i; - mask_table[and_index].block = block_index; - if (and_index >= 1) - mask_table[and_index - 1].end = i - 1; - and_index++; - } - /* cluster starts and ends will be separated because they should - * hold their position - */ - if (tab->fe[i].ctrl & RQFCR_CLE) - block_index++; - /* A not set AND indicates the end of a depended block */ - if (!(tab->fe[i].ctrl & RQFCR_AND)) - block_index++; - } - - mask_table[and_index - 1].end = i - 1; - - return and_index; -} - -/* Sorts the entries of mask_table by the values of the masks. - * Important: The 0xFF80 flags of the first and last entry of a - * block must hold their position (which queue, CLusterEnable, ReJEct, - * AND) - */ -static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) -{ - /* Pointer to compare function (_asc or _desc) */ - int (*gfar_comp)(const void *, const void *); - - u32 i, size = 0, start = 0, prev = 1; - u32 old_first, old_last, new_first, new_last; - - gfar_comp = &gfar_comp_desc; - - for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { - old_first = mask_table[start].start + 1; - old_last = mask_table[i - 1].end; - sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); - - /* Toggle order for every block. This makes the - * thing more efficient! - */ - if (gfar_comp == gfar_comp_desc) - gfar_comp = &gfar_comp_asc; - else - gfar_comp = &gfar_comp_desc; - - new_first = mask_table[start].start + 1; - new_last = mask_table[i - 1].end; - - gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND); - - start = i; - size = 0; - } - size++; - prev = mask_table[i].block; - } -} - -/* Reduces the number of masks needed in the filer table to save entries - * This is done by sorting the masks of a depended block. A depended block is - * identified by gluing ANDs or CLE. The sorting order toggles after every - * block. Of course entries in scope of a mask must change their location with - * it. - */ -static int gfar_optimize_filer_masks(struct filer_table *tab) -{ - struct filer_table *temp_table; - struct gfar_mask_entry *mask_table; - - u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; - s32 ret = 0; - - /* We need a copy of the filer table because - * we want to change its order - */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); - if (temp_table == NULL) - return -ENOMEM; - - mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); - - if (mask_table == NULL) { - ret = -ENOMEM; - goto end; - } - - and_index = gfar_generate_mask_table(mask_table, tab); - - gfar_sort_mask_table(mask_table, temp_table, and_index); - - /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says - */ - for (i = 0; i < and_index; i++) { - size = mask_table[i].end - mask_table[i].start + 1; - gfar_copy_filer_entries(&(tab->fe[j]), - &(temp_table->fe[mask_table[i].start]), size); - j += size; - } - - /* And finally we just have to check for duplicated masks and drop the - * second ones - */ - for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - previous_mask = i++; - break; - } - } - for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - if (tab->fe[i].prop == tab->fe[previous_mask].prop) { - /* Two identical ones found! - * So drop the second one! - */ - gfar_trim_filer_entries(i, i, tab); - } else - /* Not identical! */ - previous_mask = i; - } - } - - kfree(mask_table); -end: kfree(temp_table); - return ret; -} - /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, struct filer_table *tab) @@ -1585,11 +1260,10 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); - i++) + for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ - for (; i < MAX_FILER_IDX - 1; i++) + for (; i < MAX_FILER_IDX; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept * because that's what people expect @@ -1623,7 +1297,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) { struct ethtool_flow_spec_container *j; struct filer_table *tab; - s32 i = 0; s32 ret = 0; /* So index is set to zero, too! */ @@ -1648,17 +1321,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) } } - i = tab->index; - - /* Optimizations to save entries */ - gfar_cluster_filer(tab); - gfar_optimize_filer_masks(tab); - - pr_debug("\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); - /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); if (ret == -EBUSY) { @@ -1724,13 +1386,14 @@ static int gfar_add_cls(struct gfar_private *priv, } process: + priv->rx_list.count++; ret = gfar_process_filer_changes(priv); if (ret) goto clean_list; - priv->rx_list.count++; return ret; clean_list: + priv->rx_list.count--; list_del(&temp->list); clean_mem: kfree(temp); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 3e8b1bfb1f2e..d9884fd15b45 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -27,6 +27,8 @@ #include <linux/of_address.h> #include <linux/phy.h> #include <linux/clk.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> @@ -299,6 +301,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats { u64 tx_bytes; }; +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + struct mvpp2_port { u8 id; @@ -679,6 +690,9 @@ struct mvpp2_port { u32 pending_cause_rx; struct napi_struct napi; + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + /* Flags */ unsigned long flags; @@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu { /* Array of transmitted skb */ struct sk_buff **tx_skb; + /* Array of transmitted buffers' physical addresses */ + dma_addr_t *tx_buffs; + /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -913,8 +930,6 @@ struct mvpp2_bm_pool { /* Occupied buffers indicator */ atomic_t in_use; int in_use_thresh; - - spinlock_t lock; }; struct mvpp2_buff_hdr { @@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) } static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb) + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) { txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; + if (skb) + txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = + tx_desc->buf_phys_addr; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->pkt_size = 0; bm_pool->buf_num = 0; atomic_set(&bm_pool->in_use, 0); - spin_lock_init(&bm_pool->lock); return 0; } @@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, int pkt_size) { - unsigned long flags = 0; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; @@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, return NULL; } - spin_lock_irqsave(&new_pool->lock, flags); - if (new_pool->type == MVPP2_BM_FREE) new_pool->type = type; @@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); - /* We need to undo the bufs_add() allocations */ - spin_unlock_irqrestore(&new_pool->lock, flags); return NULL; } } @@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - spin_unlock_irqrestore(&new_pool->lock, flags); - return new_pool; } /* Initialize pools for swf */ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { - unsigned long flags = 0; int rxq; if (!port->pool_long) { @@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_long) return -ENOMEM; - spin_lock_irqsave(&port->pool_long->lock, flags); port->pool_long->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_long->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) return -ENOMEM; - spin_lock_irqsave(&port->pool_short->lock, flags); port->pool_short->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_short->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_short_pool_set(port, rxq, @@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg) mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } @@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, rxq->time_coal = usec; } -/* Set threshold for TX_DONE pkts coalescing */ -static void mvpp2_tx_done_pkts_coal_set(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - u32 val; - - for (queue = 0; queue < txq_number; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & - MVPP2_TRANSMITTED_THRESH_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); - } -} - /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, int i; for (i = 0; i < num; i++) { - struct mvpp2_tx_desc *tx_desc = txq->descs + - txq_pcpu->txq_get_index; + dma_addr_t buf_phys_addr = + txq_pcpu->tx_buffs[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; mvpp2_txq_inc_get(txq_pcpu); @@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, if (!skb) continue; - dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, - tx_desc->data_size, DMA_TO_DEVICE); + dma_unmap_single(port->dev->dev.parent, buf_phys_addr, + skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { - int queue = fls(cause >> 16) - 1; + int queue = fls(cause) - 1; return port->txqs[queue]; } @@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = this_cpu_ptr(txq->pcpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ @@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * sizeof(*txq_pcpu->tx_skb), GFP_KERNEL); - if (!txq_pcpu->tx_skb) { - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); - return -ENOMEM; - } + if (!txq_pcpu->tx_skb) + goto error; + + txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size * + sizeof(dma_addr_t), GFP_KERNEL); + if (!txq_pcpu->tx_buffs) + goto error; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; + +error: + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); + } + + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + return -ENOMEM; } /* Free allocated TXQ resources */ @@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); } if (txq->descs) @@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev) } } +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << txq_number) - 1; + tx_todo = mvpp2_tx_done(port, cause); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + /* Main RX/TX processing routines */ /* Display more error info */ @@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); } } @@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { @@ -5255,6 +5323,17 @@ out: dev_kfree_skb_any(skb); } + /* Finalize TX processing */ + if (txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + return NETDEV_TX_OK; } @@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) netdev_err(dev, "tx fifo underrun error\n"); } -static void mvpp2_txq_done_percpu(void *arg) +static int mvpp2_poll(struct napi_struct *napi, int budget) { - struct mvpp2_port *port = arg; - u32 cause_rx_tx, cause_tx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); /* Rx/Tx cause register * @@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg) */ cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - /* Release TX descriptors */ - if (cause_tx) { - struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - if (txq_pcpu->count) - mvpp2_txq_done(port, txq, txq_pcpu); - } -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - - on_each_cpu(mvpp2_txq_done_percpu, port, 1); - - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ @@ -5561,6 +5621,8 @@ err_cleanup_rxqs: static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; mvpp2_stop_dev(port); mvpp2_phy_disconnect(port); @@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev) on_each_cpu(mvpp2_interrupts_mask, port, 1); free_irq(port->irq, port); + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, txq->done_pkts_coal = c->tx_max_coalesced_frames; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); return 0; } @@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, { struct device_node *phy_node; struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; struct net_device *dev; struct resource *res; const char *dt_mac_addr; @@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, int features; int phy_mode; int priv_common_regs_num = 2; - int err, i; + int err, i, cpu; dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, rxq_number); @@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev, } mvpp2_port_power_up(port); + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, + (unsigned long)dev); + } + netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; @@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_txq_pcpu; + goto err_free_port_pcpu; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); @@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, priv->port_list[id] = port; return 0; +err_free_port_pcpu: + free_percpu(port->pcpu); err_free_txq_pcpu: for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); @@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 603a8b0908ee..03aabdd79abe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + err = set_caps(dev, set_ctx, set_sz); query_ex: diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3df51faf18ae..f790f61ea78a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index af050759eb44..cf9c0b241d07 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4927,6 +4927,7 @@ static void rocker_remove_ports(const struct rocker *rocker) rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, ROCKER_OP_FLAG_REMOVE); unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); } kfree(rocker->ports); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 333489f0fd24..9d89bdbf029f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -42,7 +42,7 @@ #define NSS_COMMON_CLK_DIV_MASK 0x7f #define NSS_COMMON_CLK_SRC_CTRL 0x14 -#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x) /* Mode is coded on 1 bit but is different depending on the MAC ID: * MAC0: QSGMII=0 RGMII=1 * MAC1: QSGMII=0 SGMII=0 RGMII=1 @@ -308,7 +308,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) /* Configure the clock src according to the mode */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); - val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index a8a730641bbb..bb1bb72121c0 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -85,7 +85,6 @@ struct netcp_intf { struct list_head rxhook_list_head; unsigned int rx_queue_id; void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; - u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 29ae672917b7..1a5aca55ea9f 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -34,6 +34,7 @@ #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) #define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) +#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN #define NETCP_MAX_MCAST_ADDR 16 @@ -815,30 +816,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (likely(fdq == 0)) { unsigned int primary_buf_len; /* Allocate a primary receive queue entry */ - buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; + buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; primary_buf_len = SKB_DATA_ALIGN(buf_len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (primary_buf_len <= PAGE_SIZE) { - bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; - } else { - bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | - GFP_DMA32 | __GFP_COLD); - pad[1] = 0; - } + bufptr = netdev_alloc_frag(primary_buf_len); + pad[1] = primary_buf_len; if (unlikely(!bufptr)) { - dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); + dev_warn_ratelimited(netcp->ndev_dev, + "Primary RX buffer alloc failed\n"); goto fail; } dma = dma_map_single(netcp->dev, bufptr, buf_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(netcp->dev, dma))) + goto fail; + pad[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ - page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); + page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); if (unlikely(!page)) { dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail; @@ -1021,7 +1020,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) /* Map the linear buffer */ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); - if (unlikely(!dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); return NULL; } @@ -1557,8 +1556,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) knav_queue_disable_notify(netcp->rx_queue); /* open Rx FDQs */ - for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && - netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; + ++i) { snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { @@ -1952,14 +1951,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, netcp->rx_queue_depths[0] = 128; } - ret = of_property_read_u32_array(node_interface, "rx-buffer-size", - netcp->rx_buffer_sizes, - KNAV_DMA_FDQ_PER_CHAN); - if (ret) { - dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); - netcp->rx_buffer_sizes[0] = 1536; - } - ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); if (ret < 0) { dev_err(dev, "missing \"rx-pool\" parameter\n"); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2ffbf13471d0..216bfd350169 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty) dev->type = ARPHRD_AX25; /* Perform the low-level AX25 initialization. */ - if ((err = ax_open(ax->dev))) { + err = ax_open(ax->dev); + if (err) goto out_free_netdev; - } - if (register_netdev(dev)) + err = register_netdev(dev); + if (err) goto out_free_buffers; /* after register_netdev() - because else printk smashes the kernel */ diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 3cc316cb7e6b..d8757bf9ad75 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); + if (len < 0) { + ndev->stats.rx_errors++; + ndev->stats.rx_length_errors++; + goto enqueue_again; + } + skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; @@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, return; } +enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); @@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev) rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ndev->mtu + ETH_HLEN); - if (rc == -EINVAL) { + if (rc) { dev_kfree_skb(skb); goto err; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 66f08f622dc6..546b669fbfdd 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev) /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ - dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (csum) - dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 7193b7304fdd..848ea6a399f2 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma) chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->irq = chan->cosa->irq; chan->netdev->dma = chan->cosa->dma; - if (register_hdlc_device(chan->netdev)) { + err = register_hdlc_device(chan->netdev); + if (err) { netdev_warn(chan->netdev, "register_hdlc_device() failed\n"); free_netdev(chan->netdev); diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 25d1cbd34306..b2f0d245bcf3 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) switch (phy->rev) { case 6: case 5: - if (sprom->fem.ghz5.extpa_gain == 3) + if (sprom->fem.ghz2.extpa_gain == 3) return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; /* fall through */ case 4: diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5000bfcded61..5514ad6d4e54 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->scan_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - if (iwl_mvm_scan_total_iterations(params) == 0) + if (iwl_mvm_scan_total_iterations(params) == 1) cmd->ooc_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 6203c4ad9bba..9e144e71da0b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } mdelay(5); } @@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + msleep(1); + for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + if (ret >= 0) { + ret = 0; + goto out; + } usleep_range(200, 1000); t += 200; @@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); +out: + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + return ret; } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 2b86c2135de3..607acb53c847 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b6cc9ff47fc2..1c6788aecc62 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 len; u32 num_blocks; + const u8 *fw; const struct firmware *fw_entry = NULL; u32 block_size = dev->tx_blk_size; int status = 0; @@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ + fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); - status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); + status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 1106ce76707e..30c2cf7fa93b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 3b3a88b53b11..585d0883c7e5 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + struct rtl_tcb_desc tcb_desc; - if (skb) - rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); + if (skb) { + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); + } } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 1017f02d7bf7..7bf88d9dcdc3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1a83e190fc15..28577a31549d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) { atomic_dec(&queue->inflight_packets); + + /* Wake the dealloc thread _after_ decrementing inflight_packets so + * that if kthread_stop() has already been called, the dealloc thread + * does not wait forever with nothing to wake it. + */ + wake_up(&queue->dealloc_wq); } int xenvif_schedulable(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7d50711476fe..3f44b522b831 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop) + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; int start; pending_ring_idx_t index; - unsigned int nr_slots, frag_overflow = 0; + unsigned int nr_slots; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ - if (shinfo->nr_frags > MAX_SKB_FRAGS) { - frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; - BUG_ON(frag_overflow > MAX_SKB_FRAGS); - shinfo->nr_frags = MAX_SKB_FRAGS; - } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ @@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que } if (frag_overflow) { - struct sk_buff *nskb = xenvif_alloc_skb(0); - if (unlikely(nskb == NULL)) { - if (net_ratelimit()) - netdev_err(queue->vif->dev, - "Can't allocate the frag_list skb.\n"); - return NULL; - } shinfo = skb_shinfo(nskb); frags = shinfo->frags; @@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; - struct sk_buff *skb; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; @@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; @@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); + kfree_skb(nskb); break; } } @@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); @@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->pending_cons++; - request_gop = xenvif_get_requests(queue, skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); - break; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); __skb_queue_tail(&queue->tx_queue, skb); @@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) smp_wmb(); queue->dealloc_prod++; } while (ubuf); - wake_up(&queue->dealloc_wq); spin_unlock_irqrestore(&queue->callback_lock, flags); if (likely(zerocopy_success)) diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 23435f2a5486..2e2530743831 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->dev.bus = &ntb_bus; ntb->dev.parent = &ntb->pdev->dev; ntb->dev.release = ntb_dev_release; - dev_set_name(&ntb->dev, pci_name(ntb->pdev)); + dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev)); ntb->ctx = NULL; ntb->ctx_ops = NULL; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index efe3ad4122f2..1c6386d5f79c 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -142,10 +142,11 @@ struct ntb_transport_qp { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); + struct list_head rx_post_q; struct list_head rx_pend_q; struct list_head rx_free_q; - spinlock_t ntb_rx_pend_q_lock; - spinlock_t ntb_rx_free_q_lock; + /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ + spinlock_t ntb_rx_q_lock; void *rx_buff; unsigned int rx_index; unsigned int rx_max_entry; @@ -211,6 +212,8 @@ struct ntb_transport_ctx { bool link_is_up; struct delayed_work link_work; struct work_struct link_cleanup; + + struct dentry *debugfs_node_dir; }; enum { @@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, char *buf; ssize_t ret, out_offset, out_count; + qp = filp->private_data; + + if (!qp || !qp->link_is_up) + return 0; + out_count = 1000; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; - qp = filp->private_data; out_offset = 0; out_offset += snprintf(buf + out_offset, out_count - out_offset, "NTB QP stats\n"); @@ -534,6 +541,27 @@ out: return entry; } +static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, + struct list_head *list, + struct list_head *to_list) +{ + struct ntb_queue_entry *entry; + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + if (list_empty(list)) { + entry = NULL; + } else { + entry = list_first_entry(list, struct ntb_queue_entry, entry); + list_move_tail(&entry->entry, to_list); + } + + spin_unlock_irqrestore(lock, flags); + + return entry; +} + static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { @@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, - unsigned int size) + resource_size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct pci_dev *pdev = nt->ndev->pdev; - unsigned int xlat_size, buff_size; + size_t xlat_size, buff_size; int rc; + if (!size) + return -EINVAL; + xlat_size = round_up(size, mw->xlat_align_size); buff_size = round_up(size, mw->xlat_align); @@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, if (!mw->virt_addr) { mw->xlat_size = 0; mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", + dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", buff_size); return -ENOMEM; } @@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work) if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); + + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); @@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - if (nt_debugfs_dir) { + if (nt->debugfs_node_dir) { char debugfs_name[4]; snprintf(debugfs_name, 4, "qp%d", qp_num); qp->debugfs_dir = debugfs_create_dir(debugfs_name, - nt_debugfs_dir); + nt->debugfs_node_dir); qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_dir, qp, @@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); - spin_lock_init(&qp->ntb_rx_pend_q_lock); - spin_lock_init(&qp->ntb_rx_free_q_lock); + spin_lock_init(&qp->ntb_rx_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock); + INIT_LIST_HEAD(&qp->rx_post_q); INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->tx_free_q); @@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) goto err2; } + if (nt_debugfs_dir) { + nt->debugfs_node_dir = + debugfs_create_dir(pci_name(ndev->pdev), + nt_debugfs_dir); + } + for (i = 0; i < qp_count; i++) { rc = ntb_transport_init_queue(nt, i); if (rc) @@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) kfree(nt); } -static void ntb_rx_copy_callback(void *data) +static void ntb_complete_rxc(struct ntb_transport_qp *qp) { - struct ntb_queue_entry *entry = data; - struct ntb_transport_qp *qp = entry->qp; - void *cb_data = entry->cb_data; - unsigned int len = entry->len; - struct ntb_payload_header *hdr = entry->rx_hdr; + struct ntb_queue_entry *entry; + void *cb_data; + unsigned int len; + unsigned long irqflags; + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + + while (!list_empty(&qp->rx_post_q)) { + entry = list_first_entry(&qp->rx_post_q, + struct ntb_queue_entry, entry); + if (!(entry->flags & DESC_DONE_FLAG)) + break; + + entry->rx_hdr->flags = 0; + iowrite32(entry->index, &qp->rx_info->entry); - hdr->flags = 0; + cb_data = entry->cb_data; + len = entry->len; - iowrite32(entry->index, &qp->rx_info->entry); + list_move_tail(&entry->entry, &qp->rx_free_q); - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); - if (qp->rx_handler && qp->client_ready) - qp->rx_handler(qp, qp->cb_data, cb_data, len); + if (qp->rx_handler && qp->client_ready) + qp->rx_handler(qp, qp->cb_data, cb_data, len); + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + } + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); +} + +static void ntb_rx_copy_callback(void *data) +{ + struct ntb_queue_entry *entry = data; + + entry->flags |= DESC_DONE_FLAG; + + ntb_complete_rxc(entry->qp); } static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) @@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_rx_copy_callback(entry); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, - size_t len) +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; struct dma_chan *chan = qp->dma_chan; struct dma_device *device; - size_t pay_off, buff_off; + size_t pay_off, buff_off, len; struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - entry->len = len; + len = entry->len; if (!chan) goto err; @@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; void *offset; - int rc; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); @@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) return -EIO; } - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (!entry) { dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); qp->rx_err_no_buf++; - - rc = -ENOMEM; - goto err; + return -EAGAIN; } + entry->rx_hdr = hdr; + entry->index = qp->rx_index; + if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, "receive buffer overflow! Wanted %d got %d\n", hdr->len, entry->len); qp->rx_err_oflow++; - rc = -EIO; - goto err; - } + entry->len = -EIO; + entry->flags |= DESC_DONE_FLAG; - dev_dbg(&qp->ndev->pdev->dev, - "RX OK index %u ver %u size %d into buf size %d\n", - qp->rx_index, hdr->ver, hdr->len, entry->len); + ntb_complete_rxc(qp); + } else { + dev_dbg(&qp->ndev->pdev->dev, + "RX OK index %u ver %u size %d into buf size %d\n", + qp->rx_index, hdr->ver, hdr->len, entry->len); - qp->rx_bytes += hdr->len; - qp->rx_pkts++; + qp->rx_bytes += hdr->len; + qp->rx_pkts++; - entry->index = qp->rx_index; - entry->rx_hdr = hdr; + entry->len = hdr->len; - ntb_async_rx(entry, offset, hdr->len); + ntb_async_rx(entry, offset); + } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return 0; - -err: - /* FIXME: if this syncrhonous update of the rx_index gets ahead of - * asyncrhonous ntb_rx_copy_callback of previous entry, there are three - * scenarios: - * - * 1) The peer might miss this update, but observe the update - * from the memcpy completion callback. In this case, the buffer will - * not be freed on the peer to be reused for a different packet. The - * successful rx of a later packet would clear the condition, but the - * condition could persist if several rx fail in a row. - * - * 2) The peer may observe this update before the asyncrhonous copy of - * prior packets is completed. The peer may overwrite the buffers of - * the prior packets before they are copied. - * - * 3) Both: the peer may observe the update, and then observe the index - * decrement by the asynchronous completion callback. Who knows what - * badness that will cause. - */ - hdr->flags = 0; - iowrite32(qp->rx_index, &qp->rx_info->entry); - - return rc; } static void ntb_transport_rxc_db(unsigned long data) @@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data) break; } - if (qp->dma_chan) + if (i && qp->dma_chan) dma_async_issue_pending(qp->dma_chan); if (i == qp->rx_max_entry) { @@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, goto err1; entry->qp = qp; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); } @@ -1634,7 +1674,7 @@ err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); if (qp->dma_chan) dma_release_channel(qp->dma_chan); @@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct ntb_transport_ctx *nt = qp->transport; struct pci_dev *pdev; struct ntb_queue_entry *entry; u64 qp_bit; @@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp->tx_handler = NULL; qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { - dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); + kfree(entry); + } + + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); kfree(entry); } while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); - nt->qp_bitmap_free |= qp_bit; + qp->transport->qp_bitmap_free |= qp_bit; dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); } @@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) if (!qp || qp->client_ready) return NULL; - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); if (!entry) return NULL; buf = entry->cb_data; *len = entry->len; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); return buf; } @@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, if (!qp) return -EINVAL; - entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); if (!entry) return -ENOMEM; entry->cb_data = cb; entry->buf = data; entry->len = len; + entry->flags = 0; + + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); + tasklet_schedule(&qp->rxc_db_work); return 0; } diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index e17c539e4f6f..2dad7e820ff0 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled) sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); } +EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect); static struct phy_ops sun4i_usb_phy_ops = { .init = sun4i_usb_phy_init, diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 3510b81db3fa..08020dc2c7c8 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,6 +28,8 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -52,6 +54,8 @@ #define PLL_LOCK 0x2 #define PLL_IDLE 0x1 +#define SATA_PLL_SOFT_RESET BIT(18) + /* * This is an Empirical value that works, need to confirm the actual * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status @@ -82,6 +86,9 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; + struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */ + unsigned int dpll_reset_reg; /* reg. index within syscon */ + bool sata_refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) + /* If dpll_reset_syscon is not present we wont power down SATA DPLL + * due to Errata i783 + */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") && + !phy->dpll_reset_syscon) return 0; /* PCIe doesn't have internal DPLL */ @@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x) } } + /* i783: SATA needs control bit toggle after PLL unlock */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) { + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET); + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, 0); + } + ti_pipe3_disable_clocks(phy); return 0; @@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev) } } else { phy->wkupclk = ERR_PTR(-ENODEV); + phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pllreset"); + if (IS_ERR(phy->dpll_reset_syscon)) { + dev_info(&pdev->dev, + "can't get syscon-pllreset, sata dpll won't idle\n"); + phy->dpll_reset_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pllreset", 1, + &phy->dpll_reset_reg)) { + dev_err(&pdev->dev, + "couldn't get pllreset reg. offset\n"); + return -EINVAL; + } + } } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { @@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); - /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ - if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) - if (!IS_ERR(phy->refclk)) + + /* + * Prevent auto-disable of refclk for SATA PHY due to Errata i783 + */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) { + if (!IS_ERR(phy->refclk)) { clk_prepare_enable(phy->refclk); + phy->sata_refclk_enabled = true; + } + } generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - if (!IS_ERR(phy->refclk)) + if (!IS_ERR(phy->refclk)) { clk_disable_unprepare(phy->refclk); + /* + * SATA refclk needs an additional disable as we left it + * on in probe to avoid Errata i783 + */ + if (phy->sata_refclk_enabled) { + clk_disable_unprepare(phy->refclk); + phy->sata_refclk_enabled = false; + } + } + if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); } diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index cb1329919527..3271cd1abe7c 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -4,7 +4,6 @@ menuconfig CHROME_PLATFORMS bool "Platform support for Chrome hardware" - depends on X86 || ARM ---help--- Say Y here to get to see options for platform support for various Chromebooks and Chromeboxes. This option alone does diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 882744852aac..a9aa38903efe 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; - trace_entry = &ioa_cfg->trace[atomic_add_return - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; @@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { + unsigned int hrrq; + if (ioa_cfg->hrrq_num == 1) - return 0; - else - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; } /** @@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - unsigned long hrrq_flags; + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 73790a1d0969..6b97ee45c7b4 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg { #define IPR_NUM_TRACE_INDEX_BITS 8 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) char trace_start[8]; #define IPR_TRACE_START_LABEL "trace" diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 106884a5444e..cfadccef045c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, scmd->sdb.length); scmd->sdb.table.sgl = &ses->sense_sgl; scmd->sc_data_direction = DMA_FROM_DEVICE; - scmd->sdb.table.nents = 1; + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; scmd->cmnd[0] = REQUEST_SENSE; scmd->cmnd[4] = scmd->sdb.length; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b1a263137a23..448ebdaa3d69 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) { - if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) + if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) return; __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); } @@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) if (mq) { if (nents <= SCSI_MAX_SG_SEGMENTS) { - sdb->table.nents = nents; - sg_init_table(sdb->table.sgl, sdb->table.nents); + sdb->table.nents = sdb->table.orig_nents = nents; + sg_init_table(sdb->table.sgl, nents); return 0; } first_chunk = sdb->table.sgl; diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index bfa42620a3f6..940781183fac 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev) if (index == das1801hc || index == das1802hc) return board; index = das1801hc; + break; default: dev_err(dev->class_dev, "Board model: probe returned 0x%x (unknown, please report)\n", diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c index 9c934e6d2ea1..c61add46b426 100644 --- a/drivers/staging/lustre/lustre/obdclass/debug.c +++ b/drivers/staging/lustre/lustre/obdclass/debug.c @@ -40,7 +40,7 @@ #define DEBUG_SUBSYSTEM D_OTHER -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include "../include/obd_support.h" #include "../include/lustre_debug.h" diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b0c8e235b982..69bdc8f29b59 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc) { + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && + priv->op_mode != NL80211_IFTYPE_AP) { + if (conf->assoc && conf->beacon_rate) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, conf->sync_tsf); diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index d5dd357ba57c..b49f97c734d0 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", - .owner = THIS_MODULE, .pm = &hisi_thermal_pm_ops, .of_match_table = of_hisi_thermal_match, }, diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 4672250b329f..63a448f9d93b 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz, struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 *req_power, *max_power, *granted_power, *extra_actor_power; - u32 total_req_power, max_allocatable_power; + u32 *weighted_req_power; + u32 total_req_power, max_allocatable_power, total_weighted_req_power; u32 total_granted_power, power_range; int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; @@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz, } /* - * We need to allocate three arrays of the same size: - * req_power, max_power and granted_power. They are going to - * be needed until this function returns. Allocate them all - * in one go to simplify the allocation and deallocation - * logic. + * We need to allocate five arrays of the same size: + * req_power, max_power, granted_power, extra_actor_power and + * weighted_req_power. They are going to be needed until this + * function returns. Allocate them all in one go to simplify + * the allocation and deallocation logic. */ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - req_power = devm_kcalloc(&tz->device, num_actors * 4, + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); + req_power = devm_kcalloc(&tz->device, num_actors * 5, sizeof(*req_power), GFP_KERNEL); if (!req_power) { ret = -ENOMEM; @@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz, max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; extra_actor_power = &req_power[3 * num_actors]; + weighted_req_power = &req_power[4 * num_actors]; i = 0; + total_weighted_req_power = 0; total_req_power = 0; max_allocatable_power = 0; @@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz, else weight = instance->weight; - req_power[i] = frac_to_int(weight * req_power[i]); + weighted_req_power[i] = frac_to_int(weight * req_power[i]); if (power_actor_get_max_power(cdev, tz, &max_power[i])) continue; total_req_power += req_power[i]; max_allocatable_power += max_power[i]; + total_weighted_req_power += weighted_req_power[i]; i++; } @@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz, power_range = pid_controller(tz, current_temp, control_temp, max_allocatable_power); - divvy_up_power(req_power, max_power, num_actors, total_req_power, - power_range, granted_power, extra_actor_power); + divvy_up_power(weighted_req_power, max_power, num_actors, + total_weighted_req_power, power_range, granted_power, + extra_actor_power); total_granted_power = 0; i = 0; diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig index c8e35c1a43dc..e0da3865e060 100644 --- a/drivers/thermal/samsung/Kconfig +++ b/drivers/thermal/samsung/Kconfig @@ -1,6 +1,6 @@ config EXYNOS_THERMAL tristate "Exynos thermal management unit driver" - depends on OF + depends on THERMAL_OF help If you say yes here you get support for the TMU (Thermal Management Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 531f4b179871..c96ff10b869e 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = { static int exynos_tmu_probe(struct platform_device *pdev) { - struct exynos_tmu_platform_data *pdata; struct exynos_tmu_data *data; int ret; @@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) if (ret) goto err_sensor; - pdata = data->pdata; - INIT_WORK(&data->irq_work, exynos_tmu_work); data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); @@ -1392,6 +1389,8 @@ err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); err_sensor: + if (!IS_ERR_OR_NULL(data->regulator)) + regulator_disable(data->regulator); thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); return ret; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 04659bfb888b..4ca211be4c0f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, return -ENODEV; unbind: + device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 74fea4fa41b1..3ad48e1c0c57 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = { }, }; -module_platform_driver(ci_hdrc_driver); +static int __init ci_hdrc_platform_register(void) +{ + ci_hdrc_host_driver_init(); + return platform_driver_register(&ci_hdrc_driver); +} +module_init(ci_hdrc_platform_register); + +static void __exit ci_hdrc_platform_unregister(void) +{ + platform_driver_unregister(&ci_hdrc_driver); +} +module_exit(ci_hdrc_platform_unregister); MODULE_ALIAS("platform:ci_hdrc"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 6cf87b8b13a8..7161439def19 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; + return 0; +} + +void ci_hdrc_host_driver_init(void) +{ ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; - - return 0; } diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h index 5707bf379bfb..0f12f131bdd3 100644 --- a/drivers/usb/chipidea/host.h +++ b/drivers/usb/chipidea/host.h @@ -5,6 +5,7 @@ int ci_hdrc_host_init(struct ci_hdrc *ci); void ci_hdrc_host_destroy(struct ci_hdrc *ci); +void ci_hdrc_host_driver_init(void); #else @@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci) } +static void ci_hdrc_host_driver_init(void) +{ + +} + #endif #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index f7f35a36c09a..6df9715a4bcd 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -699,6 +699,10 @@ static inline int hidg_get_minor(void) int ret; ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + if (ret >= HIDG_MINORS) { + ida_simple_remove(&hidg_ida, ret); + ret = -ENODEV; + } return ret; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 44173df27273..357f63f47b42 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = { static inline int gprinter_get_minor(void) { - return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + int ret; + + ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + if (ret >= PRINTER_MINORS) { + ida_simple_remove(&printer_ida, ret); + ret = -ENODEV; + } + + return ret; } static inline void gprinter_put_minor(int minor) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 6d3eb8b00a48..531861547253 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt) factor = 1000; } else { ep_desc = &hs_epin_desc; - factor = 125; + factor = 8000; } /* pre-compute some values for iso_complete() */ uac2->p_framesize = opts->p_ssize * num_channels(opts->p_chmask); rate = opts->p_srate * uac2->p_framesize; - uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; + uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1)); uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, prm->max_psize); diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index b04980cf6dc4..1efa61265d8d 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) /* The current hw dequeue pointer */ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); deq_ptr_64 = tmp_32; - tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); + tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0)); deq_ptr_64 |= ((u64)tmp_32 << 32); /* we have the dma addr of next bd that will be fetched by hardware */ diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 362ee8af5fce..89ed5e71a199 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -323,6 +323,7 @@ err4: err3: put_device(&udc->dev); + device_del(&gadget->dev); err2: put_device(&gadget->dev); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3e442f77a2b9..9a8c936cd42c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int size; int i, j, num_ports; - del_timer_sync(&xhci->cmd_timer); + if (timer_pending(&xhci->cmd_timer)) + del_timer_sync(&xhci->cmd_timer); /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6a8fc52aed58..32f4d564494a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; /* offset in TRBs */ segment_offset = trb - seg->trbs; - if (segment_offset > TRBS_PER_SEGMENT) + if (segment_offset >= TRBS_PER_SEGMENT) return 0; return seg->dma + (segment_offset * sizeof(*trb)); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 19b85ee98a72..876423b8892c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 9c63897b3a56..d156545728c2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ - {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ @@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 46179a0828eb..07d1ecd564f7 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 658c34bb9076..1aaf89300621 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode) int y; int c = scr_readw((u16 *) vc->vc_pos); + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (vc->vc_cursor_type & 0x10) fbcon_del_cursor_timer(info); else diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 2d98de535e0f..f888561568d9 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -298,7 +298,7 @@ config FB_ARMCLCD # Helper logic selected only by the ARM Versatile platform family. config PLAT_VERSATILE_CLCD - def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR depends on ARM depends on FB_ARMCLCD && FB=y diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c index 928ee639c0c1..bf407b6ba15c 100644 --- a/drivers/video/fbdev/omap2/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/dss/dss-of.c @@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent, } prev = port; } while (of_node_cmp(port->name, "port") != 0); + + of_node_put(ports); } return port; @@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) if (!port) return NULL; - np = of_get_next_parent(port); + np = of_get_parent(port); for (i = 0; i < 2 && np; ++i) { struct property *prop; diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 86bd457d039d..50bce45e7f3d 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) goto err_free_dma; } - ret = clk_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); if (ret < 0) { dev_err(dev, "failed to enable clock\n"); goto err_misc_deregister; @@ -685,7 +685,7 @@ err_misc_deregister: misc_deregister(&priv->misc_dev); err_disable_clk: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c index 111c2d1911d3..b5102aa6090d 100644 --- a/drivers/video/of_videomode.c +++ b/drivers/video/of_videomode.c @@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm, index = disp->native_mode; ret = videomode_from_timings(disp, vm, index); - if (ret) - return ret; display_timings_release(disp); - return 0; + return ret; } EXPORT_SYMBOL_GPL(of_get_videomode); diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 60e2a1677563..c96944b59856 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -313,6 +313,7 @@ err_init_vq: static void virtinput_remove(struct virtio_device *vdev) { struct virtio_input *vi = vdev->priv; + void *buf; unsigned long flags; spin_lock_irqsave(&vi->lock, flags); @@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev) spin_unlock_irqrestore(&vi->lock, flags); input_unregister_device(vi->idev); + vdev->config->reset(vdev); + while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) + kfree(buf); vdev->config->del_vqs(vdev); kfree(vi); } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index fd933695f232..bf4a23c7c591 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * We avoid multiple worker processes conflicting via the balloon mutex. + * As this is a work item it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. @@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) enum bp_state state = BP_DONE; long credit; - mutex_lock(&balloon_mutex); do { + mutex_lock(&balloon_mutex); + credit = current_credit(); if (credit > 0) { @@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) state = update_schedule(state); -#ifndef CONFIG_PREEMPT - if (need_resched()) - schedule(); -#endif + mutex_unlock(&balloon_mutex); + + cond_resched(); + } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ if (state == BP_EAGAIN) schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); - - mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 67b9163db718..0dbb222daaf1 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) pr_debug("priv %p\n", priv); + mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } WARN_ON(!list_empty(&priv->freeable_maps)); + mutex_unlock(&priv->lock); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9ad327238ba9..e30353575d5d 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, addrs); - if (!rv) + if (!rv) { vunmap(vaddr); + free_xenballooned_pages(node->nr_handles, node->hvm.pages); + } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, node->nr_handles); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e9ace099162c..8a8202956576 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, /* Exclusive -> exclusive, nothing changed */ } } + + /* For exclusive extent, free its reserved bytes too */ + if (nr_old_roots == 0 && nr_new_roots == 1 && + cur_new_count == nr_new_roots) + qg->reserved -= num_bytes; if (dirty) qgroup_dirty(fs_info, qg); } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index dc10c9dd36c1..ddd5e9471290 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode, swap(cf, ci->i_prealloc_cap_flush); cf->caps = flushing; - cf->kick = false; spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); @@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, static int __kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, - struct ceph_inode_info *ci, - bool kick_all) + struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; @@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { cf = rb_entry(n, struct ceph_cap_flush, i_node); - if (cf->tid < first_tid) - continue; - if (kick_all || cf->kick) + if (cf->tid >= first_tid) break; } if (!n) { @@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, } cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = false; first_tid = cf->tid + 1; @@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, { struct ceph_inode_info *ci; struct ceph_cap *cap; - struct ceph_cap_flush *cf; - struct rb_node *n; dout("early_kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { @@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, if ((cap->issued & ci->i_flushing_caps) != ci->i_flushing_caps) { spin_unlock(&ci->i_ceph_lock); - if (!__kick_flushing_caps(mdsc, session, ci, true)) + if (!__kick_flushing_caps(mdsc, session, ci)) continue; spin_lock(&ci->i_ceph_lock); } - for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { - cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = true; - } - spin_unlock(&ci->i_ceph_lock); } } @@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, dout("kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { - int delayed = __kick_flushing_caps(mdsc, session, ci, false); + int delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); @@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, spin_unlock(&ci->i_ceph_lock); - delayed = __kick_flushing_caps(mdsc, session, ci, true); + delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 4347039ecc18..6706bde9ad1b 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_flock, fl_list) { + list_for_each_entry(lock, &ctx->flc_posix, fl_list) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 860cc016e70d..2f2460d23a06 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) struct ceph_cap_flush { u64 tid; int caps; - bool kick; struct rb_node g_node; // global union { struct rb_node i_node; // inode diff --git a/fs/dcache.c b/fs/dcache.c index 5c8ea15e73a5..9b5fe503f6cb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void) inode_init_early(); } -void __init vfs_caches_init(unsigned long mempages) +void __init vfs_caches_init(void) { - unsigned long reserve; - - /* Base hash sizes on available memory, with a reserve equal to - 150% of current kernel size */ - - reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); - mempages -= reserve; - names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); dcache_init(); inode_init(); - files_init(mempages); + files_init(); + files_maxfiles_init(); mnt_init(); bdev_cache_init(); chrdev_init(); diff --git a/fs/file_table.c b/fs/file_table.c index 7f9d407c7595..ad17e05ebf95 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -25,6 +25,7 @@ #include <linux/hardirq.h> #include <linux/task_work.h> #include <linux/ima.h> +#include <linux/swap.h> #include <linux/atomic.h> @@ -308,19 +309,24 @@ void put_filp(struct file *file) } } -void __init files_init(unsigned long mempages) +void __init files_init(void) { - unsigned long n; - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + percpu_counter_init(&nr_files, 0, GFP_KERNEL); +} - /* - * One file with associated inode and dcache is very roughly 1K. - * Per default don't use more than 10% of our memory for files. - */ +/* + * One file with associated inode and dcache is very roughly 1K. Per default + * do not use more than 10% of our memory for files. + */ +void __init files_maxfiles_init(void) +{ + unsigned long n; + unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2; + + memreserve = min(memreserve, totalram_pages - 1); + n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; - n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); - percpu_counter_init(&nr_files, 0, GFP_KERNEL); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0cf74df68617..973c24ce59ad 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; + if (creat_flags == HUGETLB_SHMFS_INODE) + inode->i_flags |= S_PRIVATE; file = ERR_PTR(-ENOMEM); if (hugetlb_reserve_pages(inode, 0, diff --git a/fs/namei.c b/fs/namei.c index ae4e4c18b2ac..1c2105ed20c5 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd) return 0; /* Allowed if parent directory not sticky and world-writable. */ - parent = nd->path.dentry->d_inode; + parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; @@ -1954,8 +1954,13 @@ OK: continue; } } - if (unlikely(!d_can_lookup(nd->path.dentry))) + if (unlikely(!d_can_lookup(nd->path.dentry))) { + if (nd->flags & LOOKUP_RCU) { + if (unlazy_walk(nd, NULL, 0)) + return -ECHILD; + } return -ENOTDIR; + } } } diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 6904213a4363..ebf90e487c75 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, BUG_ON(!ls->ls_file); if (nfsd4_layout_setlease(ls)) { + fput(ls->ls_file); put_nfs4_file(fp); kmem_cache_free(nfs4_layout_stateid_cache, ls); return NULL; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 61dfb33f0559..95202719a1fd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry) queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } -static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) +static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) { - if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) + if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } @@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) { __be32 status; - status = nfs4_check_fh(fhp, ols); - if (status) - return status; status = nfsd4_check_openowner_confirmed(ols); if (status) return status; @@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, status = nfserr_bad_stateid; break; } + if (status) + goto out; + status = nfs4_check_fh(fhp, s); done: if (!status && filpp) @@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) return status; - return nfs4_check_fh(current_fh, stp); + return nfs4_check_fh(current_fh, &stp->st_stid); } /* diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 54633858733a..75e0563c09d1 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ FATTR4_WORD0_RDATTR_ERROR) #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID +#define WORD2_ABSENT_FS_ATTRS 0 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL static inline __be32 @@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, { return 0; } #endif -static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) +static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err) { /* As per referral draft: */ if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || @@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) } *bmval0 &= WORD0_ABSENT_FS_ATTRS; *bmval1 &= WORD1_ABSENT_FS_ATTRS; + *bmval2 &= WORD2_ABSENT_FS_ATTRS; return 0; } @@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); if (exp->ex_fslocs.migrated) { - BUG_ON(bmval[2]); - status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err); + status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err); if (status) goto out; } @@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, } #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || - bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { + if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) || + bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { err = security_inode_getsecctx(d_inode(dentry), &context, &contextlen); contextsupport = (err == 0); diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 92e48c70f0f0..39ddcaf0918f 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) { struct fsnotify_mark *lmark, *mark; + LIST_HEAD(to_free); + /* + * We have to be really careful here. Anytime we drop mark_mutex, e.g. + * fsnotify_clear_marks_by_inode() can come and free marks. Even in our + * to_free list so we have to use mark_mutex even when accessing that + * list. And freeing mark requires us to drop mark_mutex. So we can + * reliably free only the first mark in the list. That's why we first + * move marks to free to to_free list in one go and then free marks in + * to_free list one by one. + */ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { - if (mark->flags & flags) { - fsnotify_get_mark(mark); - fsnotify_destroy_mark_locked(mark, group); - fsnotify_put_mark(mark); - } + if (mark->flags & flags) + list_move(&mark->g_list, &to_free); } mutex_unlock(&group->mark_mutex); + + while (1) { + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + if (list_empty(&to_free)) { + mutex_unlock(&group->mark_mutex); + break; + } + mark = list_first_entry(&to_free, struct fsnotify_mark, g_list); + fsnotify_get_mark(mark); + fsnotify_destroy_mark_locked(mark, group); + mutex_unlock(&group->mark_mutex); + fsnotify_put_mark(mark); + } } /* diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1a35c6139656..0f5fd9db8194 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { u64 s = i_size_read(inode); - sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + + sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + (do_div(s, osb->s_clustersize) >> 9); ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, @@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); ret = blkdev_issue_zeroout(osb->sb->s_bdev, - p_cpos << (osb->s_clustersize_bits - 9), + (u64)p_cpos << (osb->s_clustersize_bits - 9), zero_len_head >> 9, GFP_NOFS, false); if (ret < 0) mlog_errno(ret); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8b23aa2f52dd..23157e40dd74 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) osb->dc_work_sequence = osb->dc_wake_sequence; processed = osb->blocked_lock_count; - while (processed) { - BUG_ON(list_empty(&osb->blocked_lock_list)); - + /* + * blocked lock processing in this loop might call iput which can + * remove items off osb->blocked_lock_list. Downconvert up to + * 'processed' number of locks, but stop short if we had some + * removed in ocfs2_mark_lockres_freeing when downconverting. + */ + while (processed && !list_empty(&osb->blocked_lock_list)) { lockres = list_entry(osb->blocked_lock_list.next, struct ocfs2_lock_res, l_blocked_list); list_del_init(&lockres->l_blocked_list); diff --git a/fs/signalfd.c b/fs/signalfd.c index 7e412ad74836..270221fcef42 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (kinfo->si_code == BUS_MCEERR_AR || - kinfo->si_code == BUS_MCEERR_AO) + if (kinfo->si_signo == SIGBUS && + (kinfo->si_code == BUS_MCEERR_AR || + kinfo->si_code == BUS_MCEERR_AO)) err |= __put_user((short) kinfo->si_addr_lsb, &uinfo->ssi_addr_lsb); #endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 48db6a56975f..5aa519711e0b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -691,7 +691,7 @@ struct drm_vblank_crtc { struct timer_list disable_timer; /* delayed disable timer */ /* vblank counter, protected by dev->vblank_time_lock for writes */ - unsigned long count; + u32 count; /* vblank timestamps, protected by dev->vblank_time_lock for writes */ struct timeval time[DRM_VBLANKTIME_RBSIZE]; diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index c8fc187061de..918aa68b5199 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs { * @get_modes: get mode list for this connector * @mode_valid: is this mode valid on the given connector? (optional) * @best_encoder: return the preferred encoder for this connector + * @atomic_best_encoder: atomic version of @best_encoder * * The helper operations are called by the mid-layer CRTC helper. */ @@ -176,6 +177,8 @@ struct drm_connector_helper_funcs { enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); }; extern void drm_helper_disable_unused_functions(struct drm_device *dev); diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 45c39a37f924..8bc073d297db 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -172,6 +172,7 @@ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/fs.h b/include/linux/fs.h index cc008c338f5a..84b783f277f7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -55,7 +55,8 @@ struct vm_fault; extern void __init inode_init(void); extern void __init inode_init_early(void); -extern void __init files_init(unsigned long); +extern void __init files_init(void); +extern void __init files_maxfiles_init(void); extern struct files_stat_struct files_stat; extern unsigned long get_max_files(void); @@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp); /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); -extern void __init vfs_caches_init(unsigned long); +extern void __init vfs_caches_init(void); extern struct kmem_cache *names_cachep; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f34e040b34e9..41c93844fb1d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + 1 << PG_unevictable | __PG_MLOCKED | \ __PG_COMPOUND_LOCK) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have any flags set. It they are set, + * Pages being prepped should not have these flags set. It they are set, * there has been a kernel bug or struct page corruption. + * + * __PG_HWPOISON is exceptional because it needs to be kept beyond page's + * alloc-free cycle to prevent from reusing the page. */ -#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) +#define PAGE_FLAGS_CHECK_AT_PREP \ + (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1 << PG_private | 1 << PG_private_2) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df9fdf557689..065e10b81a77 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2913,11 +2913,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * * PHY drivers may accept clones of transmitted packets for * timestamping via their phy_driver.txtstamp method. These drivers - * must call this function to return the skb back to the stack, with - * or without a timestamp. + * must call this function to return the skb back to the stack with a + * timestamp. * * @skb: clone of the the original outgoing packet - * @hwtstamps: hardware time stamps, may be NULL if not available + * @hwtstamps: hardware time stamps * */ void skb_complete_tx_timestamp(struct sk_buff *skb, diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index efe3443572ba..413417f3707b 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -319,6 +319,7 @@ #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ +#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ /* MSI-X Table entry format */ diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 785c5ca0994b..51b8066a223b 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -77,7 +77,7 @@ #define SND_SOC_TPLG_NUM_TEXTS 16 /* ABI version */ -#define SND_SOC_TPLG_ABI_VERSION 0x2 +#define SND_SOC_TPLG_ABI_VERSION 0x3 /* Max size of TLV data */ #define SND_SOC_TPLG_TLV_SIZE 32 @@ -97,7 +97,8 @@ #define SND_SOC_TPLG_TYPE_PCM 7 #define SND_SOC_TPLG_TYPE_MANIFEST 8 #define SND_SOC_TPLG_TYPE_CODEC_LINK 9 -#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK +#define SND_SOC_TPLG_TYPE_PDATA 10 +#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA /* vendor block IDs - please add new vendor types to end */ #define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 @@ -137,11 +138,19 @@ struct snd_soc_tplg_private { /* * Kcontrol TLV data. */ +struct snd_soc_tplg_tlv_dbscale { + __le32 min; + __le32 step; + __le32 mute; +} __attribute__((packed)); + struct snd_soc_tplg_ctl_tlv { - __le32 size; /* in bytes aligned to 4 */ - __le32 numid; /* control element numeric identification */ - __le32 count; /* number of elem in data array */ - __le32 data[SND_SOC_TPLG_TLV_SIZE]; + __le32 size; /* in bytes of this structure */ + __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */ + union { + __le32 data[SND_SOC_TPLG_TLV_SIZE]; + struct snd_soc_tplg_tlv_dbscale scale; + }; } __attribute__((packed)); /* @@ -155,9 +164,11 @@ struct snd_soc_tplg_channel { } __attribute__((packed)); /* - * Kcontrol Operations IDs + * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops + * Kcontrol ops need get/put/info. + * Bytes ext ops need get/put. */ -struct snd_soc_tplg_kcontrol_ops_id { +struct snd_soc_tplg_io_ops { __le32 get; __le32 put; __le32 info; @@ -171,8 +182,8 @@ struct snd_soc_tplg_ctl_hdr { __le32 type; char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; __le32 access; - struct snd_soc_tplg_kcontrol_ops_id ops; - __le32 tlv_size; /* non zero means control has TLV data */ + struct snd_soc_tplg_io_ops ops; + struct snd_soc_tplg_ctl_tlv tlv; } __attribute__((packed)); /* @@ -238,6 +249,7 @@ struct snd_soc_tplg_manifest { __le32 graph_elems; /* number of graph elements */ __le32 dai_elems; /* number of DAI elements */ __le32 dai_link_elems; /* number of DAI link elements */ + struct snd_soc_tplg_private priv; } __attribute__((packed)); /* @@ -259,7 +271,6 @@ struct snd_soc_tplg_mixer_control { __le32 invert; __le32 num_channels; struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; - struct snd_soc_tplg_ctl_tlv tlv; struct snd_soc_tplg_private priv; } __attribute__((packed)); @@ -303,6 +314,7 @@ struct snd_soc_tplg_bytes_control { __le32 mask; __le32 base; __le32 num_regs; + struct snd_soc_tplg_io_ops ext_ops; struct snd_soc_tplg_private priv; } __attribute__((packed)); @@ -347,6 +359,7 @@ struct snd_soc_tplg_dapm_widget { __le32 reg; /* negative reg = no direct dapm */ __le32 shift; /* bits to shift */ __le32 mask; /* non-shifted mask */ + __le32 subseq; /* sort within widget type */ __u32 invert; /* invert the power bit */ __u32 ignore_suspend; /* kept enabled over suspend */ __u16 event_flags; diff --git a/init/main.c b/init/main.c index c5d5626289ce..56506553d4d8 100644 --- a/init/main.c +++ b/init/main.c @@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void) key_init(); security_init(); dbg_late_init(); - vfs_caches_init(totalram_pages); + vfs_caches_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a24ba9fe5bb8..161a1807e6ef 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); @@ -187,7 +186,6 @@ try_again: "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -200,7 +198,6 @@ try_again: if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); @@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } diff --git a/ipc/shm.c b/ipc/shm.c index 06e5cf2fe019..4aef24d91b63 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) diff --git a/kernel/kthread.c b/kernel/kthread.c index 10e489c448fe..fdea0bee7b5a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -97,6 +97,7 @@ bool kthread_should_park(void) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); } +EXPORT_SYMBOL_GPL(kthread_should_park); /** * kthread_freezable_should_stop - should this freezable kthread return now? @@ -171,6 +172,7 @@ void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } +EXPORT_SYMBOL_GPL(kthread_parkme); static int kthread(void *_create) { @@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k) if (kthread) __kthread_unpark(k, kthread); } +EXPORT_SYMBOL_GPL(kthread_unpark); /** * kthread_park - park a thread created by kthread_create(). @@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k) } return ret; } +EXPORT_SYMBOL_GPL(kthread_park); /** * kthread_stop - stop a thread created by kthread_create(). diff --git a/kernel/module.c b/kernel/module.c index 4d2b82e610e2..b86b7bf1be38 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name, } EXPORT_SYMBOL_GPL(find_symbol); -/* Search for module by name: must hold module_mutex. */ +/* + * Search for module by name: must hold module_mutex (or preempt disabled + * for read-only access). + */ static struct module *find_module_all(const char *name, size_t len, bool even_unformed) { struct module *mod; - module_assert_mutex(); + module_assert_mutex_or_preempt(); list_for_each_entry(mod, &modules, list) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) @@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len, struct module *find_module(const char *name) { + module_assert_mutex(); return find_module_all(name, strlen(name), false); } EXPORT_SYMBOL_GPL(find_module); diff --git a/kernel/signal.c b/kernel/signal.c index 836df8dac6cc..0f6bbbe77b46 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif #ifdef SEGV_BNDERR - err |= __put_user(from->si_lower, &to->si_lower); - err |= __put_user(from->si_upper, &to->si_upper); + if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) { + err |= __put_user(from->si_lower, &to->si_lower); + err |= __put_user(from->si_upper, &to->si_upper); + } #endif break; case __SI_CHLD: @@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; @@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; diff --git a/lib/iommu-common.c b/lib/iommu-common.c index df30632f0bef..ff19f66d3f7f 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c @@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, unsigned long align_mask = 0; if (align_order > 0) - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = ~0ul >> (BITS_PER_LONG - align_order); /* Sanity check */ if (unlikely(npages == 0)) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c107094f79ba..097c7a4bfbd9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page, /* after clearing PageTail the gup refcount can be released */ smp_mb__after_atomic(); - /* - * retain hwpoison flag of the poisoned tail page: - * fix for the unsuitable process killed on Guest Machine(KVM) - * by the memory-failure. - */ - page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; + page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (page->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c53543d89282..ea5a93659488 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page) * directly for tail pages. */ if (PageTransHuge(head)) { + /* + * Non anonymous thp exists only in allocation/free time. We + * can't handle such a case correctly, so let's give it up. + * This should be better than triggering BUG_ON when kernel + * tries to touch the "partially handled" page. + */ + if (!PageAnon(head)) { + pr_err("MCE: %#lx: non anonymous thp\n", + page_to_pfn(page)); + return 0; + } + if (get_page_unless_zero(head)) { if (PageTail(page)) get_page(page); @@ -1134,15 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) } if (!PageHuge(p) && PageTransHuge(hpage)) { - if (!PageAnon(hpage)) { - pr_err("MCE: %#lx: non anonymous thp\n", pfn); - if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); - put_page(p); - if (p != hpage) - put_page(hpage); - return -EBUSY; - } if (unlikely(split_huge_page(hpage))) { pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) @@ -1209,9 +1212,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); atomic_long_sub(nr_pages, &num_poisoned_pages); + unlock_page(hpage); put_page(hpage); - res = 0; - goto out; + return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) @@ -1656,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags) inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1670,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags) pfn, ret, page->flags); if (ret > 0) ret = -EIO; - } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (TestClearPageHWPoison(page)) + atomic_long_dec(&num_poisoned_pages); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 26fbba7d888f..003dbe4b060d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; - unsigned long flags; + unsigned long flags, pfn; int ret; zone_type = zone - pgdat->node_zones; @@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); + + /* online_page_range is called later and expects pages reserved */ + for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) { + if (!pfn_valid(pfn)) + continue; + + SetPageReserved(pfn_to_page(pfn)); + } return 0; } diff --git a/mm/migrate.c b/mm/migrate.c index ee401e4e5ef1..eb4267107d1f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes or remove ptes */ if (page_mapped(page)) { try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| + TTU_IGNORE_HWPOISON); page_was_mapped = 1; } @@ -950,7 +951,10 @@ out: list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (reason != MR_MEMORY_FAILURE) + /* Soft-offlined page shouldn't go through lru cache list */ + if (reason == MR_MEMORY_FAILURE) + put_page(page); + else putback_lru_page(page); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 22cddd3e5de8..5cccc127ef81 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = { */ void __init page_writeback_init(void) { + BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); + writeback_set_ratelimit(); register_cpu_notifier(&ratelimit_nb); - - BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef19f22b2b7d..beda41710802 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,7 +18,6 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> -#include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/bootmem.h> @@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page, #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) -/* Only safe to use early in boot when initialisation is single-threaded */ + static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; int __meminit early_pfn_to_nid(unsigned long pfn) { + static DEFINE_SPINLOCK(early_pfn_lock); int nid; - /* The system will behave unpredictably otherwise */ - BUG_ON(system_state != SYSTEM_BOOTING); - + spin_lock(&early_pfn_lock); nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); - if (nid >= 0) - return nid; - /* just returns 0 */ - return 0; + if (nid < 0) + nid = 0; + spin_unlock(&early_pfn_lock); + + return nid; } #endif @@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page, __free_pages_boot_core(page, pfn, 0); } -static __initdata DECLARE_RWSEM(pgdat_init_rwsem); +/* Completion tracking for deferred_init_memmap() threads */ +static atomic_t pgdat_init_n_undone __initdata; +static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); + +static inline void __init pgdat_init_report_one_done(void) +{ + if (atomic_dec_and_test(&pgdat_init_n_undone)) + complete(&pgdat_init_all_done_comp); +} /* Initialise remaining memory on a node */ static int __init deferred_init_memmap(void *data) @@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data) const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (first_init_pfn == ULONG_MAX) { - up_read(&pgdat_init_rwsem); + pgdat_init_report_one_done(); return 0; } @@ -1177,7 +1184,8 @@ free_range: pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, jiffies_to_msecs(jiffies - start)); - up_read(&pgdat_init_rwsem); + + pgdat_init_report_one_done(); return 0; } @@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void) { int nid; + /* There will be num_node_state(N_MEMORY) threads */ + atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state(nid, N_MEMORY) { - down_read(&pgdat_init_rwsem); kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } /* Block until all are initialised */ - down_write(&pgdat_init_rwsem); - up_write(&pgdat_init_rwsem); + wait_for_completion(&pgdat_init_all_done_comp); + + /* Reinit limits that are based on free pages after the kernel is up */ + files_maxfiles_init(); } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page) bad_reason = "non-NULL mapping"; if (unlikely(atomic_read(&page->_count) != 0)) bad_reason = "nonzero _count"; + if (unlikely(page->flags & __PG_HWPOISON)) { + bad_reason = "HWPoisoned (hardware-corrupted)"; + bad_flags = __PG_HWPOISON; + } if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; diff --git a/mm/shmem.c b/mm/shmem.c index 4caf8ed24d65..dbe0c1e8349c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3363,8 +3363,8 @@ put_path: * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be * kernel internal. There will be NO LSM permission checks against the * underlying inode. So users of this interface must do LSM checks at a - * higher layer. The one user is the big_key implementation. LSM checks - * are provided at the key level rather than the inode level. + * higher layer. The users are the big_key and shm implementations. LSM + * checks are provided at the key or shm level rather than the inode. * @name: name for dentry (to be seen in /proc/<pid>/maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size diff --git a/mm/slab_common.c b/mm/slab_common.c index 3e5f8f29c286..86831105a09f 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache; SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB) -#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA | SLAB_NOTRACK) +#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK) /* * Merge control. If this is set then no merging of slab caches will occur. diff --git a/mm/vmscan.c b/mm/vmscan.c index e61445dce04e..8286938c70de 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, * caller can stall after page list has been processed. * * 2) Global or new memcg reclaim encounters a page that is - * not marked for immediate reclaim or the caller does not - * have __GFP_IO. In this case mark the page for immediate + * not marked for immediate reclaim, or the caller does not + * have __GFP_FS (or __GFP_IO if it's simply going to swap, + * not to fs). In this case mark the page for immediate * reclaim and continue scanning. * - * __GFP_IO is checked because a loop driver thread might + * Require may_enter_fs because we would wait on fs, which + * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * Don't require __GFP_FS, since we're not going into the - * FS, just waiting on its writeback completion. Worryingly, - * ext4 gfs2 and xfs allocate pages with - * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing - * may_enter_fs here is liable to OOM on them. - * * 3) Legacy memcg encounters a page that is not already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many @@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 2 above */ } else if (sane_reclaim(sc) || - !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { + !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index fb54e6aed096..6d0b471eede8 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @skb: packet to check * @hdr_size: size of the encapsulation header + * + * Returns true if the packet was snooped and consumed by DAT. False if the + * packet has to be delivered to the interface */ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, struct sk_buff *skb, int hdr_size) @@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, uint16_t type; __be32 ip_src, ip_dst; uint8_t *hw_src, *hw_dst; - bool ret = false; + bool dropped = false; unsigned short vid; if (!atomic_read(&bat_priv->distributed_arp_table)) @@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, /* if this REPLY is directed to a client of mine, let's deliver the * packet to the interface */ - ret = !batadv_is_my_client(bat_priv, hw_dst, vid); + dropped = !batadv_is_my_client(bat_priv, hw_dst, vid); + + /* if this REPLY is sent on behalf of a client of mine, let's drop the + * packet because the client will reply by itself + */ + dropped |= batadv_is_my_client(bat_priv, hw_src, vid); out: - if (ret) + if (dropped) kfree_skb(skb); - /* if ret == false -> packet has to be delivered to the interface */ - return ret; + /* if dropped == false -> deliver to the interface */ + return dropped; } /** diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index bb0158620628..cffa92dd9877 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; + gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); + gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index c002961da75d..a2fc843c2243 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -479,6 +479,9 @@ out: */ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) { + if (!vlan) + return; + if (atomic_dec_and_test(&vlan->refcount)) { spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); hlist_del_rcu(&vlan->list); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b4824951010b..5e953297d3b2 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -594,6 +594,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", + addr, BATADV_PRINT_VID(vid))) + goto out; batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", @@ -1034,6 +1037,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, struct batadv_tt_local_entry *tt_local_entry; uint16_t flags, curr_flags = BATADV_NO_FLAGS; struct batadv_softif_vlan *vlan; + void *tt_entry_exists; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) @@ -1061,11 +1065,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, * immediately purge it */ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); - hlist_del_rcu(&tt_local_entry->common.hash_entry); + + tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, + batadv_compare_tt, + batadv_choose_tt, + &tt_local_entry->common); + if (!tt_entry_exists) + goto out; + + /* extra call to free the local tt entry */ batadv_tt_local_entry_free_ref(tt_local_entry); /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (!vlan) + goto out; + batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan); @@ -1166,8 +1181,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common_entry->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } @@ -3207,8 +3224,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7ab191589541..e435438f95f0 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7843,7 +7843,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) /* Make sure we copy only the significant bytes based on the * encryption key size, and set the rest of the value to zeroes. */ - memcpy(ev.key.val, key->val, sizeof(key->enc_size)); + memcpy(ev.key.val, key->val, key->enc_size); memset(ev.key.val + key->enc_size, 0, sizeof(ev.key.val) - key->enc_size); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 6eb683d8e0c5..0f2408f6cdfe 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + 0; } @@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ diff --git a/net/core/datagram.c b/net/core/datagram.c index 4967262b2707..617088aee21d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -131,12 +131,12 @@ out_noerr: goto out; } -static int skb_set_peeked(struct sk_buff *skb) +static struct sk_buff *skb_set_peeked(struct sk_buff *skb) { struct sk_buff *nskb; if (skb->peeked) - return 0; + return skb; /* We have to unshare an skb before modifying it. */ if (!skb_shared(skb)) @@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb) nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); skb->prev->next = nskb; skb->next->prev = nskb; @@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb) done: skb->peeked = 1; - return 0; + return skb; } /** @@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, continue; } - error = skb_set_peeked(skb); - if (error) + skb = skb_set_peeked(skb); + error = PTR_ERR(skb); + if (IS_ERR(skb)) goto unlock_err; atomic_inc(&skb->users); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 0e0fb30cbc04..de8d5cc5eb24 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3513,8 +3513,6 @@ static int pktgen_thread_worker(void *arg) set_freezable(); - __set_current_state(TASK_RUNNING); - while (!kthread_should_stop()) { pkt_dev = next_to_run(t); @@ -3559,7 +3557,6 @@ static int pktgen_thread_worker(void *arg) try_to_freeze(); } - set_current_state(TASK_INTERRUPTIBLE); pr_debug("%s stopping all device\n", t->tsk->comm); pktgen_stop(t); diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 87b22c0bc08c..b42f0e26f89e 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) spin_lock_bh(&queue->syn_wait_lock); while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; + /* Because of following del_timer_sync(), + * we must release the spinlock here + * or risk a dead lock. + */ + spin_unlock_bh(&queue->syn_wait_lock); atomic_inc(&lopt->qlen_dec); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); reqsk_put(req); + spin_lock_bh(&queue->syn_wait_lock); } spin_unlock_bh(&queue->syn_wait_lock); } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 276758406065..aa0266f7d0ce 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -856,7 +856,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, return -ENODEV; /* Use already configured phy mode */ - p->phy_interface = p->phy->interface; + if (p->phy_interface == PHY_INTERFACE_MODE_NA) + p->phy_interface = p->phy->interface; phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, p->phy_interface); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 60021d0d9326..05e3145f7dc3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, } spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); return found; } diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index fe8cc183411e..95ea633e8356 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d27eb549ced6..93898e093d4e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 83aa604f9273..1b8c5ba7d5f7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = sk->sk_rx_dst; + dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); - if (dst) - skb_dst_set_noref(skb, dst); + if (dst) { + /* DST_NOCACHE can not be used without taking a reference */ + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } } int udp_rcv(struct sk_buff *skb) diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 6edb7b106de7..ebbb754c2111 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, } static void -synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, +synproxy_send_tcp(const struct synproxy_net *snet, + const struct sk_buff *skb, struct sk_buff *nskb, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct ipv6hdr *niph, struct tcphdr *nth, unsigned int tcp_hdr_size) { - struct net *net = nf_ct_net((struct nf_conn *)nfct); + struct net *net = nf_ct_net(snet->tmpl); struct dst_entry *dst; struct flowi6 fl6; @@ -83,7 +84,8 @@ free_nskb: } static void -synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, +synproxy_send_client_synack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; @@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } @@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, niph, nth, tcp_hdr_size); } @@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); } static void @@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool @@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); - synproxy_send_client_synack(skb, th, &opts); + synproxy_send_client_synack(snet, skb, th, &opts); return NF_DROP; } else if (th->ack && !(th->fin || th->rst || th->syn)) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 54fccf0d705d..c0fa61eba8f2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1844,6 +1844,7 @@ int ip6_route_add(struct fib6_config *cfg) int gwa_type; gw_addr = &cfg->fc_gateway; + gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() @@ -1851,11 +1852,12 @@ int ip6_route_add(struct fib6_config *cfg) * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; - if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) + if (ipv6_chk_addr_and_flags(net, gw_addr, + gwa_type & IPV6_ADDR_LINKLOCAL ? + dev : NULL, 0, 0)) goto out; rt->rt6i_gateway = *gw_addr; - gwa_type = ipv6_addr_type(gw_addr); if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 52dd0d9974d6..97d9314ea361 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 651039ad1681..3c20d02aee73 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) { struct nf_conn *tmpl; - tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); + tmpl = kzalloc(sizeof(*tmpl), flags); if (tmpl == NULL) return NULL; @@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) if (zone) { struct nf_conntrack_zone *nf_ct_zone; - nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); + nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags); if (!nf_ct_zone) goto out_free; nf_ct_zone->id = zone; @@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); - if (!hash) { - printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); + if (!hash) hash = vzalloc(sz); - } if (hash && nulls) for (i = 0; i < nr_slots; i++) diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 71f1e9fdfa18..d7f168527903 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net) int err = -ENOMEM; ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL); - if (IS_ERR(ct)) { - err = PTR_ERR(ct); + if (!ct) goto err1; - } if (!nfct_seqadj_ext_add(ct)) goto err2; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index c6630030c912..43ddeee404e9 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, goto err1; ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL); - ret = PTR_ERR(ct); - if (IS_ERR(ct)) + if (!ct) { + ret = -ENOMEM; goto err2; + } ret = 0; if ((info->ct_events || info->exp_events) && diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d8e2e3918ce2..67d210477863 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid) err = __netlink_insert(table, sk); if (err) { + /* In case the hashtable backend returns with -EBUSY + * from here, it must not escape to the caller. + */ + if (unlikely(err == -EBUSY)) + err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; nlk_sk(sk)->portid = 0; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index a0ac410e9570..14da52ddd327 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, return 0; } -static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) +static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, + __be32 addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); + if (nh->frag_off & htons(IP_OFFSET)) + return; + if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, - *addr, new_addr, 1); + addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, - *addr, new_addr, 1); + addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } +} +static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, + __be32 *addr, __be32 new_addr) +{ + update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); *addr = new_addr; diff --git a/net/rds/info.c b/net/rds/info.c index 9a6b4f66187c..140a44a5f7b7 100644 --- a/net/rds/info.c +++ b/net/rds/info.c @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, /* check for all kinds of wrapping and the like */ start = (unsigned long)optval; - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { ret = -EINVAL; goto out; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 19cd8904efa0..2d1be4a760fd 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -101,6 +101,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, return ret; ret = ACT_P_CREATED; } else { + if (bind) + return 0; if (!ovr) { tcf_hash_release(a, bind); return -EEXIST; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 21ca33c9f036..a9ba030435a2 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -288,10 +288,26 @@ begin: static void fq_codel_reset(struct Qdisc *sch) { - struct sk_buff *skb; + struct fq_codel_sched_data *q = qdisc_priv(sch); + int i; - while ((skb = fq_codel_dequeue(sch)) != NULL) - kfree_skb(skb); + INIT_LIST_HEAD(&q->new_flows); + INIT_LIST_HEAD(&q->old_flows); + for (i = 0; i < q->flows_cnt; i++) { + struct fq_codel_flow *flow = q->flows + i; + + while (flow->head) { + struct sk_buff *skb = dequeue_head(flow); + + qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); + } + + INIT_LIST_HEAD(&flow->flowchain); + codel_vars_init(&flow->cvars); + } + memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); + sch->q.qlen = 0; } static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 9cb8522d8d22..f3d3fb42b873 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); my $kconfig = $ARGV[1]; my $lsmod_file = $ENV{'LSMOD'}; -my @makefiles = `find $ksource -name Makefile 2>/dev/null`; +my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; chomp @makefiles; my %depends; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 9ed32502470e..5ebb89687936 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -406,6 +406,7 @@ static __init int yama_init(void) */ if (!security_module_enable("yama")) return 0; + yama_add_hooks(); #endif pr_info("Yama: becoming mindful.\n"); diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 7bb988fa6b6d..2a153d260836 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s, s->data_block_counter != UINT_MAX) data_block_counter = s->data_block_counter; - if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || - (s->data_block_counter == UINT_MAX)) { + if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && + data_block_counter == s->tx_first_dbc) || + s->data_block_counter == UINT_MAX) { lost = false; } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { lost = data_block_counter != s->data_block_counter; diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h index 26b909329e54..b2cf9e75693b 100644 --- a/sound/firewire/amdtp.h +++ b/sound/firewire/amdtp.h @@ -157,6 +157,8 @@ struct amdtp_stream { /* quirk: fixed interval of dbc between previos/current packets. */ unsigned int tx_dbc_interval; + /* quirk: indicate the value of dbc field in a first packet. */ + unsigned int tx_first_dbc; bool callbacked; wait_queue_head_t callback_wait; diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index c670db4eee70..c94a432f7cc6 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -248,10 +248,16 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; - if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2) - efw->is_af2 = true; + /* AudioFire8 (since 2009) and AudioFirePre8 */ if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) efw->is_af9 = true; + /* These models uses the same firmware. */ + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_4 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_9 || + entry->model_id == MODEL_GIBSON_RIP || + entry->model_id == MODEL_GIBSON_GOLDTOP) + efw->is_fireworks3 = true; snd_efw_proc_init(efw); diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index c33252b7bc84..084d414b228c 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -70,8 +70,8 @@ struct snd_efw { bool resp_addr_changable; /* for quirks */ - bool is_af2; bool is_af9; + bool is_fireworks3; u32 firmware_version; unsigned int midi_in_ports; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index a0762dd6231e..7e353f1f7bff 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -172,9 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; /* Fireworks reset dbc at bus reset. */ efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; - /* AudioFire2 starts packets with non-zero dbc. */ - if (efw->is_af2) - efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK; + /* + * But Recent firmwares starts packets with non-zero dbc. + * Driver version 5.7.6 installs firmware version 5.7.3. + */ + if (efw->is_fireworks3 && + (efw->firmware_version == 0x5070000 || + efw->firmware_version == 0x5070300 || + efw->firmware_version == 0x5080000)) + efw->tx_stream.tx_first_dbc = 0x02; /* AudioFire9 always reports wrong dbs. */ if (efw->is_af9) efw->tx_stream.flags |= CIP_WRONG_DBS; diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index b2da19b60f4e..358f16195483 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus) offset = snd_hdac_chip_readl(bus, LLCH); - if (offset < 0) - return -EIO; - /* Lets walk the linked capabilities list */ do { cur_cap = _snd_hdac_chip_read(l, bus, offset); - if (cur_cap < 0) - return -EIO; - dev_dbg(bus->dev, "Capability version: 0x%x\n", ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index f8ffbdbb450d..3de47dd1a76d 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus, if (stream->direction != substream->stream) continue; - if (stream->opened) { + if (!stream->opened) { if (!hstream->decoupled) snd_hdac_ext_stream_decouple(ebus, hstream, true); res = hstream; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c456c04e0928..0b9847affbec 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5189,6 +5189,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c index 6492bca8c70f..4ca12665ff73 100644 --- a/sound/pci/oxygen/oxygen_mixer.c +++ b/sound/pci/oxygen/oxygen_mixer.c @@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl, int changed; mutex_lock(&chip->mutex); - changed = !value->value.integer.value[0] != chip->dac_mute; + changed = (!value->value.integer.value[0]) != chip->dac_mute; if (changed) { chip->dac_mute = !value->value.integer.value[0]; chip->model.update_dac_mute(chip); diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index d7ec4756e45b..8e36198474d9 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, case SND_SOC_DAIFMT_RIGHT_J: if (params_width(params) == 16) { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (1 << 5)); + CS4265_DAC_CTL_DIF, (2 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (2 << 6)); } else { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (3 << 5)); + CS4265_DAC_CTL_DIF, (3 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (3 << 6)); } break; case SND_SOC_DAIFMT_LEFT_J: @@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_DIF, 0); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 6)); + CS4265_SPDIF_CTL2_DIF, 0); break; default: diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index e9cc3aae5366..961bd7e5877e 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, break; case RT5645_DMIC_DATA_GPIO5: + regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, + RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO); regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0353a6a273ab..278bb9f464c4 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -1693,6 +1693,10 @@ #define RT5645_GP6_PIN_SFT 6 #define RT5645_GP6_PIN_GPIO6 (0x0 << 6) #define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) +#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4) +#define RT5645_I2S2_DAC_PIN_SFT 4 +#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4) +#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4) #define RT5645_GP8_PIN_MASK (0x1 << 3) #define RT5645_GP8_PIN_SFT 3 #define RT5645_GP8_PIN_GPIO8 (0x0 << 3) diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index 4c01bb43928d..5bbaa667bec1 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c @@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata) if (byt == NULL) return -ENOMEM; + byt->dev = dev; + ipc = &byt->ipc; ipc->dev = dev; ipc->ops.tx_msg = byt_tx_msg; diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index f95f271aab0c..f6efa9d4acad 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c @@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) if (hsw == NULL) return -ENOMEM; + hsw->dev = dev; + ipc = &hsw->ipc; ipc->dev = dev; ipc->ops.tx_msg = hsw_tx_msg; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 59ac211f8fe7..31068b8f3db0 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -33,6 +33,7 @@ #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/soc-topology.h> +#include <sound/tlv.h> /* * We make several passes over the data (since it wont necessarily be ordered) @@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, k->put = bops[i].put; if (k->get == NULL && bops[i].id == hdr->ops.get) k->get = bops[i].get; - if (k->info == NULL && ops[i].id == hdr->ops.info) + if (k->info == NULL && bops[i].id == hdr->ops.info) k->info = bops[i].info; } @@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg, return 0; } + +static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg, + struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale) +{ + unsigned int item_len = 2 * sizeof(unsigned int); + unsigned int *p; + + p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p[0] = SNDRV_CTL_TLVT_DB_SCALE; + p[1] = item_len; + p[2] = scale->min; + p[3] = (scale->step & TLV_DB_SCALE_MASK) + | (scale->mute ? TLV_DB_SCALE_MUTE : 0); + + kc->tlv.p = (void *)p; + return 0; +} + static int soc_tplg_create_tlv(struct soc_tplg *tplg, - struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv) + struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc) { - struct snd_ctl_tlv *tlv; - int size; + struct snd_soc_tplg_ctl_tlv *tplg_tlv; - if (tplg_tlv->count == 0) + if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)) return 0; - size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) & - ~(sizeof(unsigned int) - 1)); - tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL); - if (tlv == NULL) - return -ENOMEM; - - dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", - tplg_tlv->numid, size); + if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { + kc->tlv.c = snd_soc_bytes_tlv_callback; + } else { + tplg_tlv = &tc->tlv; + switch (tplg_tlv->type) { + case SNDRV_CTL_TLVT_DB_SCALE: + return soc_tplg_create_tlv_db_scale(tplg, kc, + &tplg_tlv->scale); - tlv->numid = tplg_tlv->numid; - tlv->length = size; - memcpy(&tlv->tlv[0], tplg_tlv->data, size); - kc->tlv.p = (void *)tlv; + /* TODO: add support for other TLV types */ + default: + dev_dbg(tplg->dev, "Unsupported TLV type %d\n", + tplg_tlv->type); + return -EINVAL; + } + } return 0; } @@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count, } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc, &mc->tlv); + soc_tplg_create_tlv(tplg, &kc, &mc->hdr); /* register control here */ err = soc_tplg_add_kcontrol(tplg, &kc, @@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg, template.reg = w->reg; template.shift = w->shift; template.mask = w->mask; + template.subseq = w->subseq; template.on_val = w->invert ? 0 : 1; template.off_val = w->invert ? 1 : 0; template.ignore_suspend = w->ignore_suspend; |