diff options
221 files changed, 1942 insertions, 925 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 7cacc88dc79c..229c66b12cc2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5581,9 +5581,9 @@ S: Maintained F: drivers/media/tuners/mxl5007t.* MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) -M: Andrew Gallatin <gallatin@myri.com> +M: Hyong-Youb Kim <hykim@myri.com> L: netdev@vger.kernel.org -W: http://www.myri.com/scs/download-Myri10GE.html +W: https://www.myricom.com/support/downloads/myri10ge.html S: Supported F: drivers/net/ethernet/myricom/myri10ge/ @@ -7366,7 +7366,6 @@ F: drivers/net/ethernet/sfc/ SGI GRU DRIVER M: Dimitri Sivanich <sivanich@sgi.com> -M: Robin Holt <holt@sgi.com> S: Maintained F: drivers/misc/sgi-gru/ @@ -7386,7 +7385,8 @@ S: Maintained for 2.6. F: Documentation/sgi-visws.txt SGI XP/XPC/XPNET DRIVER -M: Robin Holt <holt@sgi.com> +M: Cliff Whickman <cpw@sgi.com> +M: Robin Holt <robinmholt@gmail.com> S: Maintained F: drivers/misc/sgi-xp/ @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Linux for Workgroups # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 help Architecture has the first two arguments of clone(2) swapped. +config CLONE_BACKWARDS3 + bool + help + Architecture has tls passed as the 3rd argument of clone(2), + not the 5th one. + config ODD_RT_SIGACTION bool help diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) { return 1 << mpidr_hash.bits; } + +extern int platform_can_cpu_hotplug(void); + #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " subs %1, %0, %0, ror #16\n" " addeq %0, %0, %4\n" " strexeq %2, %0, [%3]" - : "=&r" (slock), "=&r" (contended), "=r" (res) + : "=&r" (slock), "=&r" (contended), "=&r" (res) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); } while (res); @@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - unsigned long tmp; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + } while (res); - if (tmp == 0) { + if (!contended) { smp_mb(); return 1; } else { @@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2 = 1; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " adds %0, %0, #1\n" + " strexpl %1, %0, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock) + : "cc"); + } while (res); - smp_mb(); - return tmp2 == 0; + /* If the lock is negative, then it is already held for write. */ + if (contended < 0x80000000) { + smp_mb(); + return 1; + } else { + return 0; + } } /* read_can_lock - would read_trylock() succeed? */ diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -43,6 +43,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) .endm .macro kuser_cmpxchg_check -#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..fc7920288a3d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec) void set_fiq_handler(void *start, unsigned int length) { -#if defined(CONFIG_CPU_USE_DOMAINS) - void *base = (void *)0xffff0000; -#else void *base = vectors_page; -#endif unsigned offset = FIQ_OFFSET; memcpy(base + offset, start, length); + if (!cache_is_vipt_nonaliasing()) + flush_icache_range(base + offset, offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); - if (!vectors_high()) - flush_icache_range(offset, offset + length); } int claim_fiq(struct fiq_handler *f) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..d7c82df69243 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> +#include <asm/smp_plat.h> #include <asm/system_misc.h> extern const unsigned char relocate_new_kernel[]; @@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) int i, err; /* + * Validate that if the current HW supports SMP, then the SW supports + * and implements CPU hotplug for the current HW. If not, we won't be + * able to kexec reliably, so fail the prepare operation. + */ + if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) + return -EINVAL; + + /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ @@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; - if (num_online_cpus() > 1) { - pr_err("kexec: error: multiple CPUs still online\n"); - return; - } + /* + * This can only happen if machine_shutdown() failed to disable some + * CPU, and that can only happen if the checks in + * machine_kexec_prepare() were not correct. If this fails, we can't + * reliably kexec anyway, so BUG_ON is appropriate. + */ + BUG_ON(num_online_cpus() > 1); page_list = image->head & PAGE_MASK; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } -#define is_gate_vma(vma) ((vma) = &gate_vma) +#define is_gate_vma(vma) ((vma) == &gate_vma) #else #define is_gate_vma(vma) 0 #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) return -ENOSYS; } +int platform_can_cpu_hotplug(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, #define access_pmintenclr pm_fake /* Architected CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg cp15_regs[] = { /* CSSELR: swapped by interrupt.S. */ @@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c0_CSSELR }, /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, + { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, + { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, /* TTBCR: swapped by interrupt.S. */ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, @@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c6_IFAR }, /* PAR swapped by interrupt.S */ - { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, + { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, /* * DC{C,I,CI}SW operations: @@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | KVM_REG_ARM_OPC1_MASK)) return false; params->is_64bit = true; - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) + /* CRm to CRn: see cp15_to_index for details */ + params->CRn = ((id & KVM_REG_ARM_CRM_MASK) >> KVM_REG_ARM_CRM_SHIFT); params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) >> KVM_REG_ARM_OPC1_SHIFT); params->Op2 = 0; - params->CRn = 0; + params->CRm = 0; return true; default: return false; @@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) if (reg->is_64) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); + /* + * CRn always denotes the primary coproc. reg. nr. for the + * in-kernel representation, but the user space API uses the + * CRm for the encoding, because it is modelled after the + * MRRC/MCRR instructions: see the ARM ARM rev. c page + * B3-1445 + */ + val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); } else { val |= KVM_REG_SIZE_U32; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, return -1; if (i1->CRn != i2->CRn) return i1->CRn - i2->CRn; + if (i1->is_64 != i2->is_64) + return i2->is_64 - i1->is_64; if (i1->CRm != i2->CRm) return i1->CRm - i2->CRm; if (i1->Op1 != i2->Op1) @@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRn(_x) .CRn = _x #define CRm(_x) .CRm = _x +#define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x #define is64 .is_64 = true diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, /* * A15-specific CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg a15_regs[] = { /* MPIDR: we use VMPIDR for guest access. */ diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_exit_mmio *mmio) { - unsigned long rt, len; + unsigned long rt; + int len; bool is_write, sign_extend; if (kvm_vcpu_dabt_isextabt(vcpu)) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } +static bool page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { pmd_t *pmd_table = pmd_offset(pud, 0); @@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) put_page(virt_to_page(pmd)); } -static bool pmd_empty(pmd_t *pmd) -{ - struct page *pmd_page = virt_to_page(pmd); - return page_count(pmd_page) == 1; -} - static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) { if (pte_present(*pte)) { @@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) } } -static bool pte_empty(pte_t *pte) -{ - struct page *pte_page = virt_to_page(pte); - return page_count(pte_page) == 1; -} - static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { @@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; - u64 range; + u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { - addr += PUD_SIZE; + addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr += PMD_SIZE; + addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); - range = PAGE_SIZE; + next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ - if (pte_empty(pte)) { + if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); - range = PMD_SIZE; - if (pmd_empty(pmd)) { + next = pmd_addr_end(addr, end); + if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); - range = PUD_SIZE; + next = pud_addr_end(addr, end); } } - addr += range; + addr = next; } } diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); + put_cpu(); } static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,14 +42,15 @@ #define TPIDR_EL1 18 /* Thread ID, Privileged */ #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ +#define PAR_EL1 21 /* Physical Address Register */ /* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 21 /* Domain Access Control Register */ -#define IFSR32_EL2 22 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ -#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 27 +#define DACR32_EL2 22 /* Domain Access Control Register */ +#define IFSR32_EL2 23 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ +#define TEECR32_EL1 26 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 28 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ @@ -69,6 +70,8 @@ #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ +#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,7 +129,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_cache; /* Target CPU and feature flags */ - u32 target; + int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); /* Detect first run of a vcpu */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -35,6 +35,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -214,6 +214,7 @@ __kvm_hyp_code_start: mrs x21, tpidr_el1 mrs x22, amair_el1 mrs x23, cntkctl_el1 + mrs x24, par_el1 stp x4, x5, [x3] stp x6, x7, [x3, #16] @@ -225,6 +226,7 @@ __kvm_hyp_code_start: stp x18, x19, [x3, #112] stp x20, x21, [x3, #128] stp x22, x23, [x3, #144] + str x24, [x3, #160] .endm .macro restore_sysregs @@ -243,6 +245,7 @@ __kvm_hyp_code_start: ldp x18, x19, [x3, #112] ldp x20, x21, [x3, #128] ldp x22, x23, [x3, #144] + ldr x24, [x3, #160] msr vmpidr_el2, x4 msr csselr_el1, x5 @@ -264,6 +267,7 @@ __kvm_hyp_code_start: msr tpidr_el1, x21 msr amair_el1, x22 msr cntkctl_el1, x23 + msr par_el1, x24 .endm .macro skip_32bit_state tmp, target @@ -600,6 +604,8 @@ END(__kvm_vcpu_run) // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); ENTRY(__kvm_tlb_flush_vmid_ipa) + dsb ishst + kern_hyp_va x0 ldr x2, [x0, #KVM_VTTBR] msr vttbr_el2, x2 @@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) ENDPROC(__kvm_tlb_flush_vmid_ipa) ENTRY(__kvm_flush_vm_context) + dsb ishst tlbi alle1is ic ialluis dsb sy @@ -753,6 +760,10 @@ el1_trap: */ tbnz x1, #7, 1f // S1PTW is set + /* Preserve PAR_EL1 */ + mrs x3, par_el1 + push x3, xzr + /* * Permission fault, HPFAR_EL2 is invalid. * Resolve the IPA the hard way using the guest VA. @@ -766,6 +777,8 @@ el1_trap: /* Read result */ mrs x3, par_el1 + pop x0, xzr // Restore PAR_EL1 from the stack + msr par_el1, x0 tbnz x3, #0, 3f // Bail out if we failed the translation ubfx x3, x3, #12, #36 // Extract IPA lsl x3, x3, #4 // and present it like HPFAR diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), NULL, reset_unknown, FAR_EL1 }, + /* PAR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), + NULL, reset_unknown, PAR_EL1 }, /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" endmenu source "init/Kconfig" +source "kernel/Kconfig.freezer" source "drivers/Kconfig" source "fs/Kconfig" diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -22,7 +22,7 @@ * unmapping a portion of the virtual address space, these hooks are called according to * the following template: * - * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM + * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM * { * for each vma that needs a shootdown do { * tlb_start_vma(tlb, vma); @@ -58,6 +58,7 @@ struct mmu_gather { unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ + unsigned long start, end; unsigned long start_addr; unsigned long end_addr; struct page **pages; @@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; tlb->nr = 0; - tlb->fullmm = full_mm_flush; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->start_addr = ~0UL; } diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d69d49..fa277aecfb78 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c @@ -18,9 +18,11 @@ #include <asm/machdep.h> #include <asm/natfeat.h> +extern long nf_get_id2(const char *feature_name); + asm("\n" -" .global nf_get_id,nf_call\n" -"nf_get_id:\n" +" .global nf_get_id2,nf_call\n" +"nf_get_id2:\n" " .short 0x7300\n" " rts\n" "nf_call:\n" @@ -29,12 +31,25 @@ asm("\n" "1: moveq.l #0,%d0\n" " rts\n" " .section __ex_table,\"a\"\n" -" .long nf_get_id,1b\n" +" .long nf_get_id2,1b\n" " .long nf_call,1b\n" " .previous"); -EXPORT_SYMBOL_GPL(nf_get_id); EXPORT_SYMBOL_GPL(nf_call); +long nf_get_id(const char *feature_name) +{ + /* feature_name may be in vmalloc()ed memory, so make a copy */ + char name_copy[32]; + size_t n; + + n = strlcpy(name_copy, feature_name, sizeof(name_copy)); + if (n >= sizeof(name_copy)) + return 0; + + return nf_get_id2(name_copy); +} +EXPORT_SYMBOL_GPL(nf_get_id); + void nfprint(const char *fmt, ...) { static char buf[256]; diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index 444ea8a09e9f..ef881cfbbca9 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h @@ -15,16 +15,17 @@ unsigned long long n64; \ } __n; \ unsigned long __rem, __upper; \ + unsigned long __base = (base); \ \ __n.n64 = (n); \ if ((__upper = __n.n32[0])) { \ asm ("divul.l %2,%1:%0" \ - : "=d" (__n.n32[0]), "=d" (__upper) \ - : "d" (base), "0" (__n.n32[0])); \ + : "=d" (__n.n32[0]), "=d" (__upper) \ + : "d" (__base), "0" (__n.n32[0])); \ } \ asm ("divu.l %2,%1:%0" \ - : "=d" (__n.n32[1]), "=d" (__rem) \ - : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ + : "=d" (__n.n32[1]), "=d" (__rem) \ + : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ (n) = __n.n64; \ __rem; \ }) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -28,7 +28,7 @@ config MICROBLAZE select GENERIC_CLOCKEVENTS select GENERIC_IDLE_POLL_SETUP select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS + select CLONE_BACKWARDS3 config SWAP def_bool n diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; break; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case ldc2_op: /* This is bbit032 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case swc2_op: /* This is bbit1 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case sdc2_op: /* This is bbit132 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; +#endif case cop0_op: case cop1_op: case cop2_op: diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -55,6 +55,7 @@ config GENERIC_CSUM source "init/Kconfig" +source "kernel/Kconfig.freezer" menu "Processor type and features" diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..6d6d92b4ea11 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -32,6 +32,7 @@ struct mmu_gather { struct mm_struct *mm; struct mmu_table_batch *batch; unsigned int fullmm; + unsigned long start, end; }; struct mmu_table_batch { @@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned int full_mm_flush) + unsigned long start, + unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); tlb->batch = NULL; if (tlb->fullmm) __tlb_flush_mm(mm); diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT source "init/Kconfig" +source "kernel/Kconfig.freezer" + config MMU def_bool y diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); init_tlb_gather(tlb); } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); init_tlb_gather(tlb); } diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ - memset(&boot_params->olpc_ofw_header, 0, + memset(&boot_params->ext_ramdisk_image, 0, (char *)&boot_params->efi_info - - (char *)&boot_params->olpc_ofw_header); + (char *)&boot_params->ext_ramdisk_image); memset(&boot_params->kbd_status, 0, (char *)&boot_params->hdr - (char *)&boot_params->kbd_status); diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); -extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); +extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); #ifdef CONFIG_MICROCODE_AMD_EARLY #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +#ifdef CONFIG_MEM_SOFT_DIRTY + +/* + * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and + * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset + * into this range. + */ +#define PTE_FILE_MAX_BITS 28 +#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) +#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) +#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) +#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) +#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) +#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) +#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) + +#define pte_to_pgoff(pte) \ + ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ + & ((1U << PTE_FILE_BITS1) - 1))) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ + & ((1U << PTE_FILE_BITS2) - 1)) \ + << (PTE_FILE_BITS1)) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ + & ((1U << PTE_FILE_BITS3) - 1)) \ + << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ + << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) + +#define pgoff_to_pte(off) \ + ((pte_t) { .pte_low = \ + ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ + + ((((off) >> PTE_FILE_BITS1) \ + & ((1U << PTE_FILE_BITS2) - 1)) \ + << PTE_FILE_SHIFT2) \ + + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ + & ((1U << PTE_FILE_BITS3) - 1)) \ + << PTE_FILE_SHIFT3) \ + + ((((off) >> \ + (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ + << PTE_FILE_SHIFT4) \ + + _PAGE_FILE }) + +#else /* CONFIG_MEM_SOFT_DIRTY */ + /* * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, - * split up the 29 bits of offset into this range: + * split up the 29 bits of offset into this range. */ #define PTE_FILE_MAX_BITS 29 #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) @@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) << PTE_FILE_SHIFT3) \ + _PAGE_FILE }) +#endif /* CONFIG_MEM_SOFT_DIRTY */ + /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. + * + * For soft-dirty tracking 11 bit is taken from + * the low part of pte as well. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) \ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7dc305a46058..1c00631164c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); } +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & _PAGE_SOFT_DIRTY; +} + /* * Mask out unsupported bits in a present pgprot. Non-present pgprots * can use those bits for other purposes, so leave them be. diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -61,12 +61,27 @@ * they do not conflict with each other. */ +#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN + #ifdef CONFIG_MEM_SOFT_DIRTY -#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) +#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif +/* + * Tracking soft dirty bit when a page goes to a swap is tricky. + * We need a bit which can be stored in pte _and_ not conflict + * with swap entry format. On x86 bits 6 and 7 are *not* involved + * into swap entry computation, but bit 6 is used for nonlinear + * file mapping, so we borrow bit 7 for soft dirty tracking. + */ +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE +#else +#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) +#endif + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #else diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -/* The {read|write|spin}_lock() on x86 are full memory barriers. */ -static inline void smp_mb__after_lock(void) { } -#define ARCH_HAS_SMP_MB_AFTER_LOCK - #endif /* _ASM_X86_SPINLOCK_H */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f654ecefea5b..08a089043ccf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) static const int amd_erratum_383[]; static const int amd_erratum_400[]; -static bool cpu_has_amd_erratum(const int *erratum); +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); static void init_amd(struct cpuinfo_x86 *c) { @@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) value &= ~(1ULL << 24); wrmsrl_safe(MSR_AMD64_BU_CFG2, value); - if (cpu_has_amd_erratum(amd_erratum_383)) + if (cpu_has_amd_erratum(c, amd_erratum_383)) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } - if (cpu_has_amd_erratum(amd_erratum_400)) + if (cpu_has_amd_erratum(c, amd_erratum_400)) set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); @@ -878,23 +878,13 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -static bool cpu_has_amd_erratum(const int *erratum) + +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { - struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); int osvw_id = *erratum++; u32 range; u32 ms; - /* - * If called early enough that current_cpu_data hasn't been initialized - * yet, fall back to boot_cpu_data. - */ - if (cpu->x86 == 0) - cpu = &boot_cpu_data; - - if (cpu->x86_vendor != X86_VENDOR_AMD) - return false; - if (osvw_id >= 0 && osvw_id < 65536 && cpu_has(cpu, X86_FEATURE_OSVW)) { u64 osvw_len; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void) case 70: case 71: case 63: + case 69: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..1fb6c72717bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { static struct uncore_event_desc snbep_uncore_qpi_events[] = { INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), - INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), + INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), + INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), { /* end: all zeroes */ }, }; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a0adb7ee433..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_patch_size(int cpu, u32 patch_size, +static unsigned int verify_patch_size(u8 family, u32 patch_size, unsigned int size) { - struct cpuinfo_x86 *c = &cpu_data(cpu); u32 max_size; #define F1XH_MPB_MAX_SIZE 2048 @@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 - switch (c->x86) { + switch (family) { case 0x14: max_size = F14H_MPB_MAX_SIZE; break; @@ -277,9 +276,8 @@ static void cleanup(void) * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ -static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) +static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; unsigned int patch_size, crnt_size, ret; @@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) /* check if patch is for the current family */ proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); - if (proc_fam != c->x86) + if (proc_fam != family) return crnt_size; if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { @@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } - ret = verify_patch_size(cpu, patch_size, leftover); + ret = verify_patch_size(family, patch_size, leftover); if (!ret) { pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); return crnt_size; @@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } -static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, + size_t size) { enum ucode_state ret = UCODE_ERROR; unsigned int leftover; @@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz } while (leftover) { - crnt_size = verify_and_add_patch(cpu, fw, leftover); + crnt_size = verify_and_add_patch(family, fw, leftover); if (crnt_size < 0) return ret; @@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz return UCODE_OK; } -enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) +enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); - ret = __load_microcode_amd(cpu, data, size); + ret = __load_microcode_amd(family, data, size); if (ret != UCODE_OK) cleanup(); #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) /* save BSP's matching patch for early load */ - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { - struct ucode_patch *p = find_patch(cpu); + if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { + struct ucode_patch *p = find_patch(smp_processor_id()); if (p) { memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), @@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, goto fw_release; } - ret = load_microcode_amd(cpu, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c @@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } #else -static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, - struct ucode_cpu_info *uci) +void load_ucode_amd_ap(void) { + unsigned int cpu = smp_processor_id(); + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u32 rev, eax; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); eax = cpuid_eax(0x00000001); - uci->cpu_sig.sig = eax; uci->cpu_sig.rev = rev; - c->microcode = rev; - c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); -} - -void load_ucode_amd_ap(void) -{ - unsigned int cpu = smp_processor_id(); - - collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); + uci->cpu_sig.sig = eax; if (cpu && !ucode_loaded) { void *ucode; @@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) return; ucode = (void *)(initrd_start + ucode_offset); - if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) return; + ucode_loaded = true; } @@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) { enum ucode_state ret; void *ucode; + u32 eax; + #ifdef CONFIG_X86_32 unsigned int bsp = boot_cpu_data.cpu_index; struct ucode_cpu_info *uci = ucode_cpu_info + bsp; @@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) return 0; ucode = (void *)(initrd_start + ucode_offset); - ret = load_microcode_amd(0, ucode, ucode_size); + eax = cpuid_eax(0x00000001); + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + + ret = load_microcode_amd(eax, ucode, ucode_size); if (ret != UCODE_OK) return -EINVAL; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = current->mm->mmap_legacy_base; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_base(); + if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { - mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) e820_add_region(start, end - start, type); } +void xen_ignore_unusable(struct e820entry *list, size_t map_size) +{ + struct e820entry *entry; + unsigned int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + if (entry->type == E820_UNUSABLE) + entry->type = E820_RAM; + } +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + if (xen_initial_domain()) + xen_ignore_unusable(map, memmap.nr_entries); + /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; - rc = native_cpu_up(cpu, tidle); - WARN_ON (xen_smp_intr_init(cpu)); + /* + * xen_smp_intr_init() needs to run before native_cpu_up() + * so that IPI vectors are set up on the booting CPU before + * it is marked online in native_cpu_up(). + */ + rc = xen_smp_intr_init(cpu); + WARN_ON(rc); + if (!rc) + rc = native_cpu_up(cpu, tidle); return rc; } diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 99cb944a002d..4d45dba7fb8f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio) int i; bio_for_each_segment(bv, bio, i) { - page = bv->bv_page; /* Non-zero page count for non-head members of - * compound pages is no longer allowed by the kernel, - * but this has never been seen here. + * compound pages is no longer allowed by the kernel. */ - if (unlikely(PageCompound(page))) - if (compound_trans_head(page) != page) { - pr_crit("page tail used for block I/O\n"); - BUG(); - } + page = compound_trans_head(bv->bv_page); atomic_inc(&page->_count); } } @@ -924,10 +918,13 @@ static void bio_pagedec(struct bio *bio) { struct bio_vec *bv; + struct page *page; int i; - bio_for_each_segment(bv, bio, i) - atomic_dec(&bv->bv_page->_count); + bio_for_each_segment(bv, bio, i) { + page = compound_trans_head(bv->bv_page); + atomic_dec(&page->_count); + } } static void diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), - DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), - DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), + DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, + CLK_GET_RATE_NOCACHE, 0), + DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, + CLK_GET_RATE_NOCACHE, 0), DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), - DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), - DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), + DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, + 4, 3, CLK_GET_RATE_NOCACHE, 0), + DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, + 8, 3, CLK_GET_RATE_NOCACHE, 0), DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), }; @@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), }; diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); static DEFINE_SPINLOCK(ddrpll_lock); static DEFINE_SPINLOCK(iopll_lock); static DEFINE_SPINLOCK(armclk_lock); +static DEFINE_SPINLOCK(swdtclk_lock); static DEFINE_SPINLOCK(ddrclk_lock); static DEFINE_SPINLOCK(dciclk_lock); static DEFINE_SPINLOCK(gem0clk_lock); @@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) } clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, - SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); + SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); /* DDR clocks */ clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, @@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock); - clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, - SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); + clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, + CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, + &gem0clk_lock); clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], "gem0_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); @@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock); - clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, - SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); + clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, + CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, + &gem1clk_lock); clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], "gem1_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + + mutex_lock(&obj->base.dev->struct_mutex); + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); + + i915_gem_object_unpin_pages(obj); + + mutex_unlock(&obj->base.dev->struct_mutex); } static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e38b45786653..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10042,6 +10042,8 @@ struct intel_display_error_state { u32 power_well_driver; + int num_transcoders; + struct intel_cursor_error_state { u32 control; u32 position; @@ -10050,16 +10052,7 @@ struct intel_display_error_state { } cursor[I915_MAX_PIPES]; struct intel_pipe_error_state { - enum transcoder cpu_transcoder; - u32 conf; u32 source; - - u32 htotal; - u32 hblank; - u32 hsync; - u32 vtotal; - u32 vblank; - u32 vsync; } pipe[I915_MAX_PIPES]; struct intel_plane_error_state { @@ -10071,6 +10064,19 @@ struct intel_display_error_state { u32 surface; u32 tile_offset; } plane[I915_MAX_PIPES]; + + struct intel_transcoder_error_state { + enum transcoder cpu_transcoder; + + u32 conf; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } transcoder[4]; }; struct intel_display_error_state * @@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; - enum transcoder cpu_transcoder; + int transcoders[] = { + TRANSCODER_A, + TRANSCODER_B, + TRANSCODER_C, + TRANSCODER_EDP, + }; int i; + if (INTEL_INFO(dev)->num_pipes == 0) + return NULL; + error = kmalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; @@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(i) { - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); - error->pipe[i].cpu_transcoder = cpu_transcoder; - if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); @@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); error->pipe[i].source = I915_READ(PIPESRC(i)); - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + } + + error->num_transcoders = INTEL_INFO(dev)->num_pipes; + if (HAS_DDI(dev_priv->dev)) + error->num_transcoders++; /* Account for eDP. */ + + for (i = 0; i < error->num_transcoders; i++) { + enum transcoder cpu_transcoder = transcoders[i]; + + error->transcoder[i].cpu_transcoder = cpu_transcoder; + + error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); + error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); + error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); + error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); + error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); + error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); } /* In the code above we read the registers without checking if the power @@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, { int i; + if (!error) + return; + err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); if (HAS_POWER_WELL(dev)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); for_each_pipe(i) { err_printf(m, "Pipe [%d]:\n", i); - err_printf(m, " CPU transcoder: %c\n", - transcoder_name(error->pipe[i].cpu_transcoder)); - err_printf(m, " CONF: %08x\n", error->pipe[i].conf); err_printf(m, " SRC: %08x\n", error->pipe[i].source); - err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); - err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); - err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); - err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); - err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); - err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); err_printf(m, "Plane [%d]:\n", i); err_printf(m, " CNTR: %08x\n", error->plane[i].control); @@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " POS: %08x\n", error->cursor[i].position); err_printf(m, " BASE: %08x\n", error->cursor[i].base); } + + for (i = 0; i < error->num_transcoders; i++) { + err_printf(m, " CPU transcoder: %c\n", + transcoder_name(error->transcoder[i].cpu_transcoder)); + err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); + err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); + err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); + err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); + err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); + err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); + err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); + } } #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 274b8e1b889f..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); WREG32(reg, tmp_); \ } while (0) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) -#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) #define WREG32_PLL_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32_PLL(reg); \ diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index f1c15754e73c..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r; + } + } + r = radeon_bo_kmap(bo, &ptr); if (r) { DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bcc68ec204ad..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv730_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv730_golden_registers)); radeon_program_register_sequence(rdev, rv730_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv730_mgcg_init)); break; case CHIP_RV710: radeon_program_register_sequence(rdev, @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv710_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv710_golden_registers)); radeon_program_register_sequence(rdev, rv710_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv710_mgcg_init)); break; case CHIP_RV740: radeon_program_register_sequence(rdev, rv740_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv740_golden_registers)); radeon_program_register_sequence(rdev, rv740_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv740_mgcg_init)); break; default: break; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) * The bonding ndo_neigh_setup is called at init time beofre any * slave exists. So we must declare proxy setup function which will * be used at run time to resolve the actual slave neigh param setup. + * + * It's also called by master devices (such as vlans) to setup their + * underlying devices. In that case - do nothing, we're already set up from + * our init. */ static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) { - parms->neigh_setup = bond_neigh_init; + /* modify only our neigh_parms */ + if (parms->dev == dev) + parms->neigh_setup = bond_neigh_init; return 0; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) if ((mc->ptr + rec_len) > mc->end) goto decode_failed; - memcpy(cf->data, mc->ptr, rec_len); + memcpy(cf->data, mc->ptr, cf->can_dlc); mc->ptr += rec_len; } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; - for (work_done = 0; work_done <= budget; work_done++) { + for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &priv->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..ce9b387b5a19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1502,6 +1502,7 @@ struct bnx2x { #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) #define INTERRUPTS_ENABLED_FLAG (1 << 23) +#define BC_SUPPORTS_RMMOD_CMD (1 << 24) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1830,6 +1831,8 @@ struct bnx2x { int fp_array_size; u32 dump_preset_idx; + bool stats_started; + struct semaphore stats_sema; }; /* Tx queues may be less or equal to Rx queues */ @@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed { BNX2X_PCI_LINK_SPEED_5000 = 5000, BNX2X_PCI_LINK_SPEED_8000 = 8000 }; + +void bnx2x_set_local_cmng(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..f9122f2d6b65 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); + + /* ets may affect cmng configuration: reinit it in hw */ + bnx2x_set_local_cmng(bp); + bnx2x_dcbx_resume_hw_tx(bp); return; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1300,6 +1300,9 @@ struct drv_func_mb { #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 @@ -1372,6 +1375,8 @@ struct drv_func_mb { #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..955d6cfd9cb7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) input.port_rate = bp->link_vars.line_speed; - if (cmng_type == CMNG_FNS_MINMAX) { + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { int vn; /* read mf conf from shmem */ @@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp, } } +/* init cmng mode in HW according to local configuration */ +void bnx2x_set_local_cmng(struct bnx2x *bp) +{ + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - if (bp->link_vars.link_up && bp->link_vars.line_speed) { - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); - - if (cmng_fns != CMNG_FNS_NONE) { - bnx2x_cmng_fns_init(bp, false, cmng_fns); - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - } else - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode without fairness\n"); - } + if (bp->link_vars.link_up && bp->link_vars.line_speed) + bnx2x_set_local_cmng(bp); __bnx2x_link_report(bp); @@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; + + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? + BC_SUPPORTS_RMMOD_CMD : 0; + boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); + sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, bnx2x_dcbnl_update_applist(bp, true); #endif + if (IS_PF(bp) && + !BP_NOMCP(bp) && + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); + /* Close the interface - either directly or implicitly */ if (remove_netdev) { unregister_netdev(dev); } else { rtnl_lock(); - if (netif_running(dev)) - bnx2x_close(dev); + dev_close(dev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..44104fb27947 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) alloc_mem_err: BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, sizeof(union pf_vf_bulletin)); return -ENOMEM; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..d63d1327b051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) * Statistics service functions */ -static void bnx2x_stats_pmf_update(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) *stats_comp = 0; } -static void bnx2x_stats_start(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_start(struct bnx2x *bp) { /* vfs travel through here as part of the statistics FSM, but no action * is required @@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + + bp->stats_started = true; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_pmf_update(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_pmf_update(bp); + up(&bp->stats_sema); } static void bnx2x_stats_restart(struct bnx2x *bp) @@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) */ if (IS_VF(bp)) return; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ - spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { @@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (bnx2x_edebug_stats_stopped(bp)) + /* we run update from timer context, so give up + * if somebody is in the middle of transition + */ + if (down_trylock(&bp->stats_sema)) return; + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) + goto out; + if (IS_PF(bp)) { if (*stats_comp != DMAE_COMP_VAL) - return; + goto out; if (bp->port.pmf) bnx2x_hw_stats_update(bp); @@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) BNX2X_ERR("storm stats were not updated for 3 times\n"); bnx2x_panic(); } - return; + goto out; } } else { /* vf doesn't collect HW statistics, and doesn't get completions @@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) /* vf is done */ if (IS_VF(bp)) - return; + goto out; if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; @@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + +out: + up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + + bp->stats_started = false; + bnx2x_stats_comp(bp); if (bp->port.pmf) @@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } + + up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1376,15 +1416,17 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; + void (*action)(struct bnx2x *bp); if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; + action = bnx2x_stats_stm[state][event].action; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); + action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, done: if (state == pci_channel_io_perm_failure) { - tg3_napi_enable(tp); - dev_close(netdev); + if (netdev) { + tg3_napi_enable(tp); + dev_close(netdev); + } err = PCI_ERS_RESULT_DISCONNECT; } else { pci_disable_device(pdev); @@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rtnl_lock(); if (pci_enable_device(pdev)) { - netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); goto done; } @@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) pci_restore_state(pdev); pci_save_state(pdev); - if (!netif_running(netdev)) { + if (!netdev || !netif_running(netdev)) { rc = PCI_ERS_RESULT_RECOVERED; goto done; } @@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: - if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { tg3_napi_enable(tp); dev_close(netdev); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { - __free_pages(q->pg_chunk.page, order); - q->pg_chunk.page = NULL; - return -EIO; - } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } - -/* map_skb - map a packet main body and its page fragments - * @pdev: the PCI device - * @skb: the packet - * @addr: placeholder to save the mapped addresses - * - * map the main body of an sk_buff and its page fragments, if any. - */ -static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, - dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto out_err; - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - - for (fp = si->frags; fp < end; fp++) { - *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), - DMA_TO_DEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto unwind; - } - return 0; - -unwind: - while (fp-- > si->frags) - dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), - DMA_TO_DEVICE); - - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); -out_err: - return -ENOMEM; -} - /** - * write_sgl - populate a scatter/gather list for a packet + * make_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @addr: the list of the mapped addresses + * @pdev: the PCI device * - * Copies the scatter/gather list for the buffers that make up a packet + * Generates a scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int write_sgl(const struct sk_buff *skb, +static inline unsigned int make_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, - unsigned int len, const dma_addr_t *addr) + unsigned int len, struct pci_dev *pdev) { - unsigned int i, j = 0, k = 0, nfrags; + dma_addr_t mapping; + unsigned int i, j = 0, nfrags; if (len) { + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[j++] = cpu_to_be64(addr[k++]); + sgp->addr[0] = cpu_to_be64(mapping); + j = 1; } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(addr[k++]); + sgp->addr[j] = cpu_to_be64(mapping); j ^= 1; if (j == 0) ++sgp; @@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl, const dma_addr_t *addr) + unsigned int compl) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc, - const dma_addr_t *addr) + unsigned int gen, unsigned int ndesc) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), - skb_tail_pointer(skb) - - skb_transport_header(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb->tail - skb->transport_header, + adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { - spin_unlock(&q->lock); - return NET_XMIT_SUCCESS; - } - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; - unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) - break; - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; - written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, - (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - if (likely(written)) - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) adapter->max_event_queues = le16_to_cpu(desc->eq_count); adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); + + /* Clear flags that driver is not interested in */ + adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; } err: mutex_unlock(&adapter->mbox_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -563,6 +563,12 @@ enum be_if_flags { BE_IF_FLAGS_MULTICAST = 0x1000 }; +#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ + BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ + BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ + BE_IF_FLAGS_UNTAGGED) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) } /* Allocate and setup a new buffer for receiving */ -static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; - u64 map; + dma_addr_t map; map = pci_map_single(skge->hw->pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); - rd->dma_lo = map; - rd->dma_hi = map >> 32; + if (pci_dma_mapping_error(skge->hw->pdev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; @@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); + return 0; } /* Resume receiving using existing skb, @@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); - skge_rx_setup(skge, e, skb, skge->rx_buf_size); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } } while ((e = e->next) != ring->start); ring->to_clean = ring->start; @@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) BUG_ON(skge->dma & 7); - if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); err = -EINVAL; goto free_pci_mem; @@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct skge_tx_desc *td; int i; u32 control, len; - u64 map; + dma_addr_t map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; @@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, e->skb = skb; len = skb_headlen(skb); map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); - td->dma_lo = map; - td->dma_hi = map >> 32; + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); @@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); - tf->dma_lo = map; - tf->dma_hi = (u64) map >> 32; + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); @@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, } return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + while (i-- > 0) { + e = e->next; + pci_unmap_page(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; } @@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, pci_dma_sync_single_for_cpu(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(e->skb, skb->data, len); pci_dma_sync_single_for_device(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; @@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!nskb) goto resubmit; + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + pci_unmap_single(skge->hw->pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; prefetch(skb->data); - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); } skb_put(skb, len); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -46,7 +46,7 @@ #include "mlx5_core.h" enum { - CMD_IF_REV = 4, + CMD_IF_REV = 5, }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_PAGE_REQUEST: { u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); - s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; caps->log_max_mcg = out->hca_cap.log_max_mcg; - caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -55,33 +55,9 @@ enum { }; static DEFINE_SPINLOCK(health_lock); - static LIST_HEAD(health_list); static struct work_struct health_work; -static health_handler_t reg_handler; -int mlx5_register_health_report_handler(health_handler_t handler) -{ - spin_lock_irq(&health_lock); - if (reg_handler) { - spin_unlock_irq(&health_lock); - return -EEXIST; - } - reg_handler = handler; - spin_unlock_irq(&health_lock); - - return 0; -} -EXPORT_SYMBOL(mlx5_register_health_report_handler); - -void mlx5_unregister_health_report_handler(void) -{ - spin_lock_irq(&health_lock); - reg_handler = NULL; - spin_unlock_irq(&health_lock); -} -EXPORT_SYMBOL(mlx5_unregister_health_report_handler); - static void health_care(struct work_struct *work) { struct mlx5_core_health *health, *n; @@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); + /* nothing yet */ spin_lock_irq(&health_lock); - if (reg_handler) - reg_handler(dev->pdev, health->health, - sizeof(health->health)); - list_del_init(&health->list); spin_unlock_irq(&health_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -43,10 +43,16 @@ enum { MLX5_PAGES_TAKE = 2 }; +enum { + MLX5_BOOT_PAGES = 1, + MLX5_INIT_PAGES = 2, + MLX5_POST_INIT_PAGES = 3 +}; + struct mlx5_pages_req { struct mlx5_core_dev *dev; u32 func_id; - s16 npages; + s32 npages; struct work_struct work; }; @@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { struct mlx5_query_pages_outbox { struct mlx5_outbox_hdr hdr; - __be16 num_boot_pages; + __be16 rsvd; __be16 func_id; - __be16 init_pages; - __be16 num_pages; + __be32 num_pages; }; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; - __be16 rsvd0; + __be16 rsvd; __be16 func_id; - __be16 rsvd1; - __be16 num_entries; - u8 rsvd2[16]; + __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; - u8 rsvd0[2]; - __be16 num_entries; - u8 rsvd1[20]; + __be32 num_entries; + u8 rsvd[4]; __be64 pas[0]; }; @@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, - s16 *pages, s16 *init_pages, u16 *boot_pages) + s32 *npages, int boot) { struct mlx5_query_pages_inbox in; struct mlx5_query_pages_outbox out; @@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; @@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); - if (pages) - *pages = be16_to_cpu(out.num_pages); - - if (init_pages) - *init_pages = be16_to_cpu(out.init_pages); - - if (boot_pages) - *boot_pages = be16_to_cpu(out.num_boot_pages); - + *npages = be32_to_cpu(out.num_pages); *func_id = be16_to_cpu(out.func_id); return err; @@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be16(npages); + in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); mlx5_core_dbg(dev, "err %d\n", err); if (err) { @@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be16(npages); + in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { @@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, goto out_free; } - num_claimed = be16_to_cpu(out->num_entries); + num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; @@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages) + s32 npages) { struct mlx5_pages_req *req; @@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(boot_pages); - s16 uninitialized_var(init_pages); u16 uninitialized_var(func_id); + s32 uninitialized_var(npages); int err; - err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, - &boot_pages); + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", + npages, boot ? "boot" : "init", func_id); - mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", - init_pages, boot_pages, func_id); - return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); + return give_pages(dev, func_id, npages, 0); } static int optimal_reclaimed_pages(void) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) u8 val; int ret, max_sds_rings = adapter->max_sds_rings; + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); + return -EBUSY; + } + if (qlcnic_get_diag_lock(adapter)) { netdev_info(netdev, "Device in diagnostics mode\n"); return -EBUSY; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) return -EIO; } - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_disable_mbx_intr; - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); pci_set_drvdata(pdev, adapter); @@ -3085,7 +3086,8 @@ done: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { err = qlcnic_get_beacon_state(adapter, &h_beacon_state); - if (!err) { - dev_info(&adapter->pdev->dev, - "Failed to get current beacon state\n"); + if (err) { + netdev_err(adapter->netdev, + "Failed to get current beacon state\n"); } else { if (h_beacon_state == QLCNIC_BEACON_DISABLE) ahw->beacon_state = 0; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -524,6 +524,7 @@ rx_status_loop: PCI_DMA_FROMDEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; + kfree_skb(new_skb); goto rx_next; } diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; unsigned int entry = priv->cur_tx % txsize; - struct dma_desc *desc = priv->dma_tx + entry; + struct dma_desc *desc; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax, len; + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; else @@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) STMMAC_RING_MODE); wmb(); entry = (++priv->cur_tx) % txsize; - desc = priv->dma_tx + entry; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, GFP_KERNEL); - if (unlikely(skb == NULL)) { + if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); - return 1; + return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, priv->dma_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { + pr_err("%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } p->des2 = priv->rx_skbuff_dma[i]; @@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return 0; } +static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) +{ + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; +} + /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure @@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static void init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; + int ret = -ENOMEM; /* Set the max buffer size according to the DESC mode * and the MTU. Note that RING mode allows 16KiB bsize. @@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) dma_extended_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_erx) + goto err_dma; + priv->dma_etx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_erx) || (!priv->dma_etx)) - return; + if (!priv->dma_etx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + goto err_dma; + } } else { priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma; + priv->dma_tx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_rx) || (!priv->dma_tx)) - return; + if (!priv->dma_tx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + goto err_dma; + } } priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->rx_skbuff_dma) + goto err_rx_skbuff_dma; + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->rx_skbuff) + goto err_rx_skbuff; + priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->tx_skbuff_dma) + goto err_tx_skbuff_dma; + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skbuff; + if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - if (stmmac_init_rx_buffers(priv, p, i)) - break; + ret = stmmac_init_rx_buffers(priv, p, i); + if (ret) + goto err_init_rx_buffers; if (netif_msg_probe(priv)) pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], @@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) if (netif_msg_hw(priv)) stmmac_display_rings(priv); + + return 0; +err_init_rx_buffers: + while (--i >= 0) + stmmac_free_rx_buffers(priv, i); + kfree(priv->tx_skbuff); +err_tx_skbuff: + kfree(priv->tx_skbuff_dma); +err_tx_skbuff_dma: + kfree(priv->rx_skbuff); +err_rx_skbuff: + kfree(priv->rx_skbuff_dma); +err_rx_skbuff_dma: + if (priv->extend_desc) { + dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + priv->dma_etx, priv->dma_tx_phy); + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + } else { + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + } +err_dma: + return ret; } static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) { - if (priv->rx_skbuff[i]) { - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); - } - priv->rx_skbuff[i] = NULL; - } + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffers(priv, i); } static void dma_free_tx_skbufs(struct stmmac_priv *priv) @@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); - init_dma_desc_rings(dev); + + ret = init_dma_desc_rings(dev); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto dma_desc_error; + } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { - pr_err("%s: DMA initialization failed\n", __func__); + pr_err("%s: DMA engine initialization failed\n", __func__); goto init_error; } @@ -1672,6 +1744,7 @@ wolirq_error: init_error: free_dma_desc_resources(priv); +dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); phy_error: diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - netif_rx(skb); + netif_receive_skb(skb); stats->rx_bytes += pkt_len; stats->rx_packets++; @@ -2884,6 +2884,7 @@ out: return ret; err_iounmap: + netif_napi_del(&vptr->napi); iounmap(regs); err_free_dev: free_netdev(netdev); @@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) struct velocity_info *vptr = netdev_priv(netdev); unregister_netdev(netdev); + netif_napi_del(&vptr->napi); iounmap(vptr->mac_regs); free_netdev(netdev); velocity_nics--; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d0f9c2fd1d4f..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) return -EADDRNOTAVAIL; } + if (data && data[IFLA_MACVLAN_FLAGS] && + nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) + return -EINVAL; + if (data && data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..b51db2abfe44 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } - if (vlan) + if (vlan) { + local_bh_disable(); macvlan_start_xmit(skb, vlan->dev); - else + local_bh_enable(); + } else { kfree_skb(skb); + } rcu_read_unlock(); return total_len; @@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, done: rcu_read_lock(); vlan = rcu_dereference(q->vlan); - if (vlan) + if (vlan) { + preempt_disable(); macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); + preempt_enable(); + } rcu_read_unlock(); return ret ? ret : copied; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u32 rxhash; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) > total_len) + if (len < sizeof(pi)) return -EINVAL; + len -= sizeof(pi); if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) return -EFAULT; @@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (tun->flags & TUN_VNET_HDR) { - if ((len -= tun->vnet_hdr_sz) > total_len) + if (len < tun->vnet_hdr_sz) return -EINVAL; + len -= tun->vnet_hdr_sz; if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) return -EFAULT; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f4c6db419ddb..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev) return -ENOTCONN; if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && - ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { + vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_join); @@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); - flush_workqueue(vxlan_wq); - spin_lock(&vn->sock_lock); hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c @@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) if (!priv->join_status) goto done; - if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { - wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", - priv->join_status); - BUG_ON(1); - } + if (priv->join_status == CW1200_JOIN_STATUS_AP) + goto done; cancel_work_sync(&priv->update_filtering_work); cancel_work_sync(&priv->set_beacon_wakeup_period_work); diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..f2ed62e37340 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il) * is killed. Hence update the killswitch state here. The * rfkill handler will care about restarting if needed. */ - if (!test_bit(S_ALIVE, &il->status)) { - if (hw_rf_kill) - set_bit(S_RFKILL, &il->status); - else - clear_bit(S_RFKILL, &il->status); + if (hw_rf_kill) { + set_bit(S_RFKILL, &il->status); + } else { + clear_bit(S_RFKILL, &il->status); wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); + il_force_reset(il, true); } handled |= CSR_INT_BIT_RF_KILL; @@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) il->active_rate = RATES_MASK; + il_power_update_mode(il, true); + D_INFO("Updated power mode\n"); + if (il_is_associated(il)) { struct il_rxon_cmd *active_rxon = (struct il_rxon_cmd *)&il->active; @@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) D_INFO("ALIVE processing complete.\n"); wake_up(&il->wait_command_queue); - il_power_update_mode(il, true); - D_INFO("Updated power mode\n"); - return; restart: diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) return 0; } +EXPORT_SYMBOL(il_force_reset); int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c47fd1e5450b..94716c779800 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c @@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sunxi_pinctrl_group *g = &pctl->groups[group]; + unsigned long flags; u32 val, mask; u16 strength; u8 dlevel; @@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, * 3: 40mA */ dlevel = strength / 10 - 1; + + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), pctl->membase + sunxi_dlevel_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; case PIN_CONFIG_BIAS_PULL_UP: + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_pull_reg(g->pin)); mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), pctl->membase + sunxi_pull_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; case PIN_CONFIG_BIAS_PULL_DOWN: + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_pull_reg(g->pin)); mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), pctl->membase + sunxi_pull_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; default: break; @@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, u8 config) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + unsigned long flags; + u32 val, mask; + + spin_lock_irqsave(&pctl->lock, flags); - u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); - u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); + val = readl(pctl->membase + sunxi_mux_reg(pin)); + mask = MUX_PINS_MASK << sunxi_mux_offset(pin); writel((val & ~mask) | config << sunxi_mux_offset(pin), pctl->membase + sunxi_mux_reg(pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); } static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, @@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); u32 reg = sunxi_data_reg(offset); u8 index = sunxi_data_offset(offset); + unsigned long flags; + u32 regval; + + spin_lock_irqsave(&pctl->lock, flags); + + regval = readl(pctl->membase + reg); - writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); + if (value) + regval |= BIT(index); + else + regval &= ~(BIT(index)); + + writel(regval, pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, @@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); u32 reg = sunxi_irq_cfg_reg(d->hwirq); u8 index = sunxi_irq_cfg_offset(d->hwirq); + unsigned long flags; + u32 regval; u8 mode; switch (type) { @@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, return -EINVAL; } - writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); + spin_lock_irqsave(&pctl->lock, flags); + + regval = readl(pctl->membase + reg); + regval &= ~IRQ_CFG_IRQ_MASK; + writel(regval | (mode << index), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); return 0; } @@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); u32 status_reg = sunxi_irq_status_reg(d->hwirq); u8 status_idx = sunxi_irq_status_offset(d->hwirq); + unsigned long flags; u32 val; + spin_lock_irqsave(&pctl->lock, flags); + /* Mask the IRQ */ val = readl(pctl->membase + ctrl_reg); writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); /* Clear the IRQ */ writel(1 << status_idx, pctl->membase + status_reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static void sunxi_pinctrl_irq_mask(struct irq_data *d) @@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); u32 reg = sunxi_irq_ctrl_reg(d->hwirq); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); + unsigned long flags; u32 val; + spin_lock_irqsave(&pctl->lock, flags); + /* Mask the IRQ */ val = readl(pctl->membase + reg); writel(val & ~(1 << idx), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static void sunxi_pinctrl_irq_unmask(struct irq_data *d) @@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) struct sunxi_desc_function *func; u32 reg = sunxi_irq_ctrl_reg(d->hwirq); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); + unsigned long flags; u32 val; func = sunxi_pinctrl_desc_find_function_by_pin(pctl, @@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) /* Change muxing to INT mode */ sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); + spin_lock_irqsave(&pctl->lock, flags); + /* Unmask the IRQ */ val = readl(pctl->membase + reg); writel(val | (1 << idx), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static struct irq_chip sunxi_pinctrl_irq_chip = { @@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, pctl); + spin_lock_init(&pctl->lock); + pctl->membase = of_iomap(node, 0); if (!pctl->membase) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index d68047d8f699..01c494f8a14f 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h @@ -14,6 +14,7 @@ #define __PINCTRL_SUNXI_H #include <linux/kernel.h> +#include <linux/spinlock.h> #define PA_BASE 0 #define PB_BASE 32 @@ -407,6 +408,7 @@ struct sunxi_pinctrl { unsigned ngroups; int irq; int irq_array[SUNXI_IRQ_NUMBER]; + spinlock_t lock; struct pinctrl_dev *pctl_dev; }; diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 767fee2ab340..26019531db15 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/of_device.h> @@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) } #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ -static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) +static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) { + int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ /* - * The datasheet doesn't say which way round the - * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, - * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS + * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 + * states: + * | The order in which registers are updated is + * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. + * | (This list is in bitfield order, from LSB to MSB, as they would + * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT + * | register. For example, the Seconds register corresponds to + * | STALE_REGS or NEW_REGS containing 0x80.) */ - while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & - (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) - cpu_relax(); + do { + if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) + return 0; + udelay(1); + } while (--timeout > 0); + return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; } /* Time read/write */ static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { + int ret; struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); - stmp3xxx_wait_time(rtc_data); + ret = stmp3xxx_wait_time(rtc_data); + if (ret) + return ret; + rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); return 0; } @@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); - stmp3xxx_wait_time(rtc_data); - return 0; + return stmp3xxx_wait_time(rtc_data); } /* interrupt(s) handler */ diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 609dbc2f7151..83b4ef4dfcf8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf, /* Determine if it is a Rigol or not */ data->rigol_quirk = 0; dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", - data->usb_dev->descriptor.idVendor, - data->usb_dev->descriptor.idProduct); + le16_to_cpu(data->usb_dev->descriptor.idVendor), + le16_to_cpu(data->usb_dev->descriptor.idProduct)); for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { - if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && - (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { + if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && + (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); data->rigol_quirk = 1; break; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04d8, 0x000c), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* CarrolTouch 4000U */ + { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* CarrolTouch 4500U */ + { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d548..8e3c878f38cf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1391,21 +1391,20 @@ iso_stream_schedule ( /* Behind the scheduling threshold? */ if (unlikely(start < next)) { + unsigned now2 = (now - base) & (mod - 1); /* USB_ISO_ASAP: Round up to the first available slot */ if (urb->transfer_flags & URB_ISO_ASAP) start += (next - start + period - 1) & -period; /* - * Not ASAP: Use the next slot in the stream. If - * the entire URB falls before the threshold, fail. + * Not ASAP: Use the next slot in the stream, + * no matter what. */ - else if (start + span - period < next) { - ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", + else if (start + span - period < now2) { + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", urb, start + base, - span - period, next + base); - status = -EXDEV; - goto fail; + span - period, now2 + base); } } diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index eb3c8c142fa9..eeb27208c0d1 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface, /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", - udev->descriptor.idProduct, dev->serial_number, + le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, (dev->minor - ADU_MINOR_BASE)); exit: dbg(2, " %s : leave, return value %p (dev)", __func__, dev); diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 5a979729f8ec..58c17fdc85eb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial) if (d_details == NULL) { dev_err(&serial->dev->dev, "%s - unknown product id %x\n", __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); - return 1; + return -ENODEV; } /* Setup private data for serial driver */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -90,6 +90,7 @@ struct urbtracker { struct list_head urblist_entry; struct kref ref_count; struct urb *urb; + struct usb_ctrlrequest *setup; }; enum mos7715_pp_modes { @@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) struct mos7715_parport *mos_parport = urbtrack->mos_parport; usb_free_urb(urbtrack->urb); + kfree(urbtrack->setup); kfree(urbtrack); kref_put(&mos_parport->ref_count, destroy_mos_parport); } @@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, struct urbtracker *urbtrack; int ret_val; unsigned long flags; - struct usb_ctrlrequest setup; struct usb_serial *serial = mos_parport->serial; struct usb_device *usbdev = serial->dev; @@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, kfree(urbtrack); return -ENOMEM; } - setup.bRequestType = (__u8)0x40; - setup.bRequest = (__u8)0x0e; - setup.wValue = get_reg_value(reg, dummy); - setup.wIndex = get_reg_index(reg); - setup.wLength = 0; + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); + if (!urbtrack->setup) { + usb_free_urb(urbtrack->urb); + kfree(urbtrack); + return -ENOMEM; + } + urbtrack->setup->bRequestType = (__u8)0x40; + urbtrack->setup->bRequest = (__u8)0x0e; + urbtrack->setup->wValue = get_reg_value(reg, dummy); + urbtrack->setup->wIndex = get_reg_index(reg); + urbtrack->setup->wLength = 0; usb_fill_control_urb(urbtrack->urb, usbdev, usb_sndctrlpipe(usbdev, 0), - (unsigned char *)&setup, + (unsigned char *)urbtrack->setup, NULL, 0, async_complete, urbtrack); kref_init(&urbtrack->ref_count); INIT_LIST_HEAD(&urbtrack->urblist_entry); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index d953d674f222..3bac4693c038 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial) static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { - u16 product = serial->dev->descriptor.idProduct; + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); u8 *buf; int device_type; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 375b5a400b6f..5c9f9b1d7736 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev) char buf[32]; /* try ID specific firmware first, then try generic firmware */ - sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, - dev->descriptor.idProduct); + sprintf(buf, "ti_usb-v%04x-p%04x.fw", + le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); status = request_firmware(&fw_p, buf, &dev->dev); if (status != 0) { buf[0] = '\0'; - if (dev->descriptor.idVendor == MTS_VENDOR_ID) { - switch (dev->descriptor.idProduct) { + if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { + switch (le16_to_cpu(dev->descriptor.idProduct)) { case MTS_CDMA_PRODUCT_ID: strcpy(buf, "mts_cdma.fw"); break; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) tty_flip_buffer_push(&port->port); } else dev_dbg(dev, "%s: empty read urb received\n", __func__); - - /* Resubmit urb so we continue receiving */ - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) { - if (err != -EPERM) { - dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); - /* busy also in error unless we are killed */ - usb_mark_last_busy(port->serial->dev); - } - } else { + } + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + if (err != -EPERM) { + dev_err(dev, "%s: resubmit read urb failed. (%d)\n", + __func__, err); + /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } + } else { + usb_mark_last_busy(port->serial->dev); } } diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 16968c899493..d3493ca0525d 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) } spin_lock_irqsave(&xfer->lock, flags); rpipe = xfer->ep->hcpriv; + if (rpipe == NULL) { + pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", + __func__, wa_xfer_id(xfer), + "Probably already aborted.\n" ); + goto out_unlock; + } /* Check the delayed list -> if there, release and complete */ spin_lock_irqsave(&wa->xfer_list_lock, flags2); if (!list_empty(&xfer->list_node) && xfer->seg == NULL) @@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb) break; } usb_status = xfer_result->bTransferStatus & 0x3f; - if (usb_status == WA_XFER_STATUS_ABORTED - || usb_status == WA_XFER_STATUS_NOT_FOUND) + if (usb_status == WA_XFER_STATUS_NOT_FOUND) /* taken care of already */ break; xfer_id = xfer_result->dwTransferID; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) for_each_possible_cpu(i) memset(per_cpu(cpu_evtchn_mask, i), - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); } static inline void clear_evtchn(int port) @@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { + struct shared_info *s = HYPERVISOR_shared_info; struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); + int masked; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) bind_vcpu.vcpu = tcpu; /* + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); + + /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. @@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); + if (!masked) + unmask_evtchn(evtchn); + return 0; } diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 45e57cc38200..fc6f4f3a1a9d 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(server->secmech.md5)) { cifs_dbg(VFS, "could not allocate crypto md5\n"); - return PTR_ERR(server->secmech.md5); + rc = PTR_ERR(server->secmech.md5); + server->secmech.md5 = NULL; + return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { - rc = -ENOMEM; crypto_free_shash(server->secmech.md5); server->secmech.md5 = NULL; - return rc; + return -ENOMEM; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; @@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) if (blobptr + attrsize > blobend) break; if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { - if (!attrsize) + if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) break; if (!ses->domainName) { ses->domainName = @@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) { + int rc; unsigned int size; /* check if already allocated */ @@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (IS_ERR(server->secmech.hmacmd5)) { cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); - return PTR_ERR(server->secmech.hmacmd5); + rc = PTR_ERR(server->secmech.hmacmd5); + server->secmech.hmacmd5 = NULL; + return rc; } size = sizeof(struct shash_desc) + diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4bdd547dbf6f..85ea98d139fc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb) goto out_no_root; } + if (cifs_sb_master_tcon(cifs_sb)->nocase) + sb->s_d_op = &cifs_ci_dentry_ops; + else + sb->s_d_op = &cifs_dentry_ops; + sb->s_root = d_make_root(inode); if (!sb->s_root) { rc = -ENOMEM; goto out_no_root; } - /* do that *after* d_make_root() - we want NULL ->d_op for root here */ - if (cifs_sb_master_tcon(cifs_sb)->nocase) - sb->s_d_op = &cifs_ci_dentry_ops; - else - sb->s_d_op = &cifs_dentry_ops; - #ifdef CONFIG_CIFS_NFSD_EXPORT if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifs_dbg(FYI, "export ops supported\n"); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1fdc37041057..52ca861ed35e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -44,6 +44,7 @@ #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) #define MAX_SERVER_SIZE 15 #define MAX_SHARE_SIZE 80 +#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */ #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ @@ -369,6 +370,9 @@ struct smb_version_operations { void (*generate_signingkey)(struct TCP_Server_Info *server); int (*calc_signature)(struct smb_rqst *rqst, struct TCP_Server_Info *server); + int (*query_mf_symlink)(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid); }; struct smb_version_values { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f7e584d047e2..b29a012bed33 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work); struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete); void cifs_writedata_release(struct kref *refcount); - +int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa68813396b5..d67c550c4980 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, if (string == NULL) goto out_nomem; - if (strnlen(string, 256) == 256) { + if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) + == CIFS_MAX_DOMAINNAME_LEN) { printk(KERN_WARNING "CIFS: domain name too" " long\n"); goto cifs_parse_mount_err; @@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses) #ifdef CONFIG_KEYS -/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ -#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) +/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ +#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) /* Populate username and pw fields from keyring if possible */ static int diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1e57f36ea1b2..7e36ae34e947 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) oflags, &oplock, &cfile->fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix reopen succeeded\n"); + oparms.reconnect = true; goto reopen_success; } /* diff --git a/fs/cifs/link.c b/fs/cifs/link.c index b83c3f5646bd..562044f700e5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr) } int -CIFSCheckMFSymlink(struct cifs_fattr *fattr, - const unsigned char *path, - struct cifs_sb_info *cifs_sb, unsigned int xid) +open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid) { int rc; int oplock = 0; __u16 netfid = 0; struct tcon_link *tlink; - struct cifs_tcon *pTcon; + struct cifs_tcon *ptcon; struct cifs_io_parms io_parms; - u8 *buf; - char *pbuf; - unsigned int bytes_read = 0; int buf_type = CIFS_NO_BUFFER; - unsigned int link_len = 0; FILE_ALL_INFO file_info; - if (!CIFSCouldBeMFSymlink(fattr)) - /* it's not a symlink */ - return 0; - tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); - pTcon = tlink_tcon(tlink); + ptcon = tlink_tcon(tlink); - rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, + rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ, CREATE_NOT_DIR, &netfid, &oplock, &file_info, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc != 0) - goto out; + if (rc != 0) { + cifs_put_tlink(tlink); + return rc; + } if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { - CIFSSMBClose(xid, pTcon, netfid); + CIFSSMBClose(xid, ptcon, netfid); + cifs_put_tlink(tlink); /* it's not a symlink */ - goto out; + return rc; } - buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); - if (!buf) { - rc = -ENOMEM; - goto out; - } - pbuf = buf; io_parms.netfid = netfid; io_parms.pid = current->tgid; - io_parms.tcon = pTcon; + io_parms.tcon = ptcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; - rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); - CIFSSMBClose(xid, pTcon, netfid); - if (rc != 0) { - kfree(buf); + rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); + CIFSSMBClose(xid, ptcon, netfid); + cifs_put_tlink(tlink); + return rc; +} + + +int +CIFSCheckMFSymlink(struct cifs_fattr *fattr, + const unsigned char *path, + struct cifs_sb_info *cifs_sb, unsigned int xid) +{ + int rc = 0; + u8 *buf = NULL; + unsigned int link_len = 0; + unsigned int bytes_read = 0; + struct cifs_tcon *ptcon; + + if (!CIFSCouldBeMFSymlink(fattr)) + /* it's not a symlink */ + return 0; + + buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; goto out; } + ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); + if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) + rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, + &bytes_read, cifs_sb, xid); + else + goto out; + + if (rc != 0) + goto out; + + if (bytes_read == 0) /* not a symlink */ + goto out; + rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); - kfree(buf); if (rc == -EINVAL) { /* it's not a symlink */ rc = 0; @@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; fattr->cf_dtype = DT_LNK; out: - cifs_put_tlink(tlink); + kfree(buf); return rc; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index ab8778469394..69d2c826a23b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, return; } + /* + * If we know that the inode will need to be revalidated immediately, + * then don't create a new dentry for it. We'll end up doing an on + * the wire call either way and this spares us an invalidation. + */ + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) + return; + dentry = d_alloc(parent, name); if (!dentry) return; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 79358e341fd2..08dd37bb23aa 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, bytes_ret = 0; } else bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, - 256, nls_cp); + CIFS_MAX_DOMAINNAME_LEN, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null terminator */ @@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, /* copy domain */ if (ses->domainName != NULL) { - strncpy(bcc_ptr, ses->domainName, 256); - bcc_ptr += strnlen(ses->domainName, 256); + strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); + bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); } /* else we will send a null domain name so the server will default to its own domain */ *bcc_ptr = 0; diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 6457690731a2..60943978aec3 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = { .mand_lock = cifs_mand_lock, .mand_unlock_range = cifs_unlock_range, .push_mand_locks = cifs_push_mandatory_locks, + .query_mf_symlink = open_query_close_cifs_symlink, }; struct smb_version_values smb1_values = { diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 301b191270b9..4f2300d020c7 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -42,6 +42,7 @@ static int smb2_crypto_shash_allocate(struct TCP_Server_Info *server) { + int rc; unsigned int size; if (server->secmech.sdeschmacsha256 != NULL) @@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server) server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); - return PTR_ERR(server->secmech.hmacsha256); + rc = PTR_ERR(server->secmech.hmacsha256); + server->secmech.hmacsha256 = NULL; + return rc; } size = sizeof(struct shash_desc) + @@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; - return PTR_ERR(server->secmech.cmacaes); + rc = PTR_ERR(server->secmech.cmacaes); + server->secmech.cmacaes = NULL; + return rc; } size = sizeof(struct shash_desc) + diff --git a/fs/exec.c b/fs/exec.c index 9c73def87642..fd774c7cb483 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) return -ENOMEM; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, old_start, old_end); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. @@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) free_pgd_range(&tlb, old_start, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); } - tlb_finish_mmu(&tlb, new_end, old_end); + tlb_finish_mmu(&tlb, old_start, old_end); /* * Shrink the vma to just the new range. Always succeeds. diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b577e45425b0..0ab26fbf3380 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *); extern void ext4_dirty_inode(struct inode *, int); extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); +extern int ext4_inode_attach_jinode(struct inode *inode); extern int ext4_can_truncate(struct inode *inode); extern void ext4_truncate(struct inode *); extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 72a3600aedbd..17ac112ab101 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, set_buffer_prio(bh); if (ext4_handle_valid(handle)) { err = jbd2_journal_dirty_metadata(handle, bh); - if (err) { - /* Errors can only happen if there is a bug */ - handle->h_err = err; - __ext4_journal_stop(where, line, handle); + /* Errors can only happen if there is a bug */ + if (WARN_ON_ONCE(err)) { + ext4_journal_abort_handle(where, line, __func__, bh, + handle, err); } } else { if (inode) diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 6f4cc567c382..319c9d26279a 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - struct ext4_inode_info *ei = EXT4_I(inode); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; @@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp) * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ - if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { - struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); - - spin_lock(&inode->i_lock); - if (!ei->jinode) { - if (!jinode) { - spin_unlock(&inode->i_lock); - return -ENOMEM; - } - ei->jinode = jinode; - jbd2_journal_init_jbd_inode(ei->jinode, inode); - jinode = NULL; - } - spin_unlock(&inode->i_lock); - if (unlikely(jinode != NULL)) - jbd2_free_inode(jinode); + if (filp->f_mode & FMODE_WRITE) { + int ret = ext4_inode_attach_jinode(inode); + if (ret < 0) + return ret; } return dquot_file_open(inode, filp); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index dd32a2eacd0d..c2ca04e67a4f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3533,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) offset; } + if (offset & (sb->s_blocksize - 1) || + (offset + length) & (sb->s_blocksize - 1)) { + /* + * Attach jinode to inode for jbd2 if we do any zeroing of + * partial block + */ + ret = ext4_inode_attach_jinode(inode); + if (ret < 0) + goto out_mutex; + + } + first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; @@ -3601,6 +3613,31 @@ out_mutex: return ret; } +int ext4_inode_attach_jinode(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct jbd2_inode *jinode; + + if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) + return 0; + + jinode = jbd2_alloc_inode(GFP_KERNEL); + spin_lock(&inode->i_lock); + if (!ei->jinode) { + if (!jinode) { + spin_unlock(&inode->i_lock); + return -ENOMEM; + } + ei->jinode = jinode; + jbd2_journal_init_jbd_inode(ei->jinode, inode); + jinode = NULL; + } + spin_unlock(&inode->i_lock); + if (unlikely(jinode != NULL)) + jbd2_free_inode(jinode); + return 0; +} + /* * ext4_truncate() * @@ -3661,6 +3698,12 @@ void ext4_truncate(struct inode *inode) return; } + /* If we zero-out tail of the page, we have to create jinode for jbd2 */ + if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { + if (ext4_inode_attach_jinode(inode) < 0) + return; + } + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 9491ac0590f7..c0427e2f6648 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); - memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); - memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); + ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); + ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); + ext4_es_lru_del(inode1); + ext4_es_lru_del(inode2); isize = i_size_read(inode1); i_size_write(inode1, i_size_read(inode2)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 36b141e420b7..b59373b625e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1359,7 +1359,7 @@ static const struct mount_opts { {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, - MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, + MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | @@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and delalloc"); + "both data=journal and dioread_nolock"); goto failed_mount; } if (test_opt(sb, DELALLOC)) @@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { + if (test_opt2(sb, EXPLICIT_DELALLOC)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and delalloc"); + err = -EINVAL; + goto restore_opts; + } + if (test_opt(sb, DIOREAD_NOLOCK)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dioread_nolock"); + err = -EINVAL; + goto restore_opts; + } + } + if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9435384562a2..544a809819c3 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void) glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0); - if (IS_ERR(glock_workqueue)) - return PTR_ERR(glock_workqueue); + if (!glock_workqueue) + return -ENOMEM; gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); - if (IS_ERR(gfs2_delete_workqueue)) { + if (!gfs2_delete_workqueue) { destroy_workqueue(glock_workqueue); - return PTR_ERR(gfs2_delete_workqueue); + return -ENOMEM; } register_shrinker(&glock_shrinker); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 5f2e5224c51c..e2e0a90396e7 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) * None of the buffers should be dirty, locked, or pinned. */ -static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) +static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, + unsigned int nr_revokes) { struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *head = &gl->gl_ail_list; @@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); - list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { + list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { + if (nr_revokes == 0) + break; bh = bd->bd_bh; if (bh->b_state & b_state) { if (fsync) @@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_ail_error(gl, bh); } gfs2_trans_add_revoke(sdp, bd); + nr_revokes--; } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); @@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) WARN_ON_ONCE(current->journal_info); current->journal_info = &tr; - __gfs2_ail_flush(gl, 0); + __gfs2_ail_flush(gl, 0, tr.tr_revokes); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); @@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { struct gfs2_sbd *sdp = gl->gl_sbd; unsigned int revokes = atomic_read(&gl->gl_ail_count); + unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); int ret; if (!revokes) return; - ret = gfs2_trans_begin(sdp, 0, revokes); + while (revokes > max_revokes) + max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); + + ret = gfs2_trans_begin(sdp, 0, max_revokes); if (ret) return; - __gfs2_ail_flush(gl, fsync); + __gfs2_ail_flush(gl, fsync, max_revokes); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index bbb2715171cd..64915eeae5a7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, } gfs2_glock_dq_uninit(ghs); if (IS_ERR(d)) - return PTR_RET(d); + return PTR_ERR(d); return error; } else if (error != -ENOENT) { goto fail_gunlock; @@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, struct gfs2_holder gh; int ret; + /* For selinux during lookup */ + if (gfs2_glock_is_locked_by_me(ip->i_gl)) + return generic_getxattr(dentry, name, data, size); + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); ret = gfs2_glock_nq(&gh); if (ret == 0) { diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e04d0e09ee7b..7b0f5043cf24 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void) goto fail_wq; gfs2_control_wq = alloc_workqueue("gfs2_control", - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); + WQ_UNBOUND | WQ_FREEZABLE, 0); if (!gfs2_control_wq) goto fail_recovery; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3f868ae3fd4..34423978b170 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, return inode; } +/* + * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never + * be taken from reclaim -- unlike regular filesystems. This needs an + * annotation because huge_pmd_share() does an allocation under + * i_mmap_mutex. + */ +struct lock_class_key hugetlbfs_i_mmap_mutex_key; + static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev) @@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); + lockdep_set_class(&inode->i_mapping->i_mmap_mutex, + &hugetlbfs_i_mmap_mutex_key); inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 79736a28d84f..2abf97b2a592 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1757,7 +1757,7 @@ try_again: goto out; } else if (ret == 1) { clusters_need = wc->w_clen; - ret = ocfs2_refcount_cow(inode, filp, di_bh, + ret = ocfs2_refcount_cow(inode, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index eb760d8acd50..30544ce8e9f7 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode) { int ret; struct ocfs2_empty_dir_priv priv = { - .ctx.actor = ocfs2_empty_dir_filldir + .ctx.actor = ocfs2_empty_dir_filldir, }; - memset(&priv, 0, sizeof(priv)); - if (ocfs2_dir_indexed(inode)) { ret = ocfs2_empty_dir_dx(inode, &priv); if (ret) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41000f223ca4..3261d71319ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode, if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out; - return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); + return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); out: return status; @@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode, zero_clusters = last_cpos - zero_cpos; if (needs_cow) { - rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, + rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters, UINT_MAX); if (rc) { mlog_errno(rc); @@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode, *meta_level = 1; - ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); + ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); if (ret) mlog_errno(ret); out: diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 96f9ac237e86..0a992737dcaf 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + - ocfs2_quota_trans_credits(sb) + bits_wanted; + ocfs2_quota_trans_credits(sb); } static inline int ocfs2_calc_symlink_credits(struct super_block *sb) diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f1fc172175b6..452068b45749 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle, u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); - ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, + ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, p_cpos, new_p_cpos, len); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f6b96a09615..a70d604593b6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -49,7 +49,6 @@ struct ocfs2_cow_context { struct inode *inode; - struct file *file; u32 cow_start; u32 cow_len; struct ocfs2_extent_tree data_et; @@ -66,7 +65,7 @@ struct ocfs2_cow_context { u32 *num_clusters, unsigned int *extent_flags); int (*cow_duplicate_clusters)(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); }; @@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) } int ocfs2_duplicate_clusters_by_page(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0, partial; - struct inode *inode = file_inode(file); - struct ocfs2_caching_info *ci = INODE_CACHE(inode); - struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct super_block *sb = inode->i_sb; u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); struct page *page; pgoff_t page_index; @@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) BUG_ON(PageDirty(page)); - if (PageReadahead(page)) { - page_cache_async_readahead(mapping, - &file->f_ra, file, - page, page_index, - readahead_pages); - } - if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); if (ret) { @@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, } } - ocfs2_map_and_dirty_page(inode, handle, from, to, + ocfs2_map_and_dirty_page(inode, + handle, from, to, page, 0, &new_block); mark_page_accessed(page); unlock: @@ -3020,12 +3011,11 @@ unlock: } int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0; - struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct ocfs2_caching_info *ci = INODE_CACHE(inode); int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); @@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle, /*If the old clusters is unwritten, no need to duplicate. */ if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { - ret = context->cow_duplicate_clusters(handle, context->file, + ret = context->cow_duplicate_clusters(handle, context->inode, cpos, old, new, len); if (ret) { mlog_errno(ret); @@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context) return ret; } -static void ocfs2_readahead_for_cow(struct inode *inode, - struct file *file, - u32 start, u32 len) -{ - struct address_space *mapping; - pgoff_t index; - unsigned long num_pages; - int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; - - if (!file) - return; - - mapping = file->f_mapping; - num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT; - if (!num_pages) - num_pages = 1; - - index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT; - page_cache_sync_readahead(mapping, &file->f_ra, file, - index, num_pages); -} - /* * Starting at cpos, try to CoW write_len clusters. Don't CoW * past max_cpos. This will stop when it runs into a hole or an * unrefcounted extent. */ static int ocfs2_refcount_cow_hunk(struct inode *inode, - struct file *file, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { @@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, BUG_ON(cow_len == 0); - ocfs2_readahead_for_cow(inode, file, cow_start, cow_len); - context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); if (!context) { ret = -ENOMEM; @@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, context->ref_root_bh = ref_root_bh; context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; context->get_clusters = ocfs2_di_get_clusters; - context->file = file; ocfs2_init_dinode_extent_tree(&context->data_et, INODE_CACHE(inode), di_bh); @@ -3537,7 +3501,6 @@ out: * clusters between cpos and cpos+write_len are safe to modify. */ int ocfs2_refcount_cow(struct inode *inode, - struct file *file, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { @@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode, num_clusters = write_len; if (ext_flags & OCFS2_EXT_REFCOUNTED) { - ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, + ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, num_clusters, max_cpos); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 7754608c83a4..6422bbcdb525 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h @@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, int *credits, int *ref_blocks); int ocfs2_refcount_cow(struct inode *inode, - struct file *filep, struct buffer_head *di_bh, + struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos); typedef int (ocfs2_post_refcount_func)(struct inode *inode, @@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode, u32 cpos, u32 write_len, struct ocfs2_post_refcount *post); int ocfs2_duplicate_clusters_by_page(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); int ocfs2_cow_sync_writeback(struct super_block *sb, diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 94441a407337..737e15615b04 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file, de = next; } while (de); spin_unlock(&proc_subdir_lock); - return 0; + return 1; } int proc_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/root.c b/fs/proc/root.c index 229e366598da..e0a790da726d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr static int proc_root_readdir(struct file *file, struct dir_context *ctx) { if (ctx->pos < FIRST_PROCESS_ENTRY) { - proc_readdir(file, ctx); + int error = proc_readdir(file, ctx); + if (unlikely(error <= 0)) + return error; ctx->pos = FIRST_PROCESS_ENTRY; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbf61f6174f0..107d026f5d6e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, * of how soft-dirty works. */ pte_t ptent = *pte; - ptent = pte_wrprotect(ptent); - ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); + + if (pte_present(ptent)) { + ptent = pte_wrprotect(ptent); + ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); + } else if (is_swap_pte(ptent)) { + ptent = pte_swp_clear_soft_dirty(ptent); + } else if (pte_file(ptent)) { + ptent = pte_file_clear_soft_dirty(ptent); + } + set_pte_at(vma->vm_mm, addr, pte, ptent); #endif } @@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; - if (!pte_present(ptent)) - continue; if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty(vma, addr, pte); continue; } + if (!pte_present(ptent)) + continue; + page = vm_normal_page(vma, addr, ptent); if (!page) continue; @@ -859,7 +868,7 @@ typedef struct { } pagemap_entry_t; struct pagemapread { - int pos, len; + int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ pagemap_entry_t *buffer; bool v2; }; @@ -867,7 +876,7 @@ struct pagemapread { #define PAGEMAP_WALK_SIZE (PMD_SIZE) #define PAGEMAP_WALK_MASK (PMD_MASK) -#define PM_ENTRY_BYTES sizeof(u64) +#define PM_ENTRY_BYTES sizeof(pagemap_entry_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) @@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, flags = PM_PRESENT; page = vm_normal_page(vma, addr, pte); } else if (is_swap_pte(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); - + swp_entry_t entry; + if (pte_swp_soft_dirty(pte)) + flags2 |= __PM_SOFT_DIRTY; + entry = pte_to_swp_entry(pte); frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags = PM_SWAP; @@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, goto out_task; pm.v2 = soft_dirty_cleared; - pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); - pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); + pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); + pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); ret = -ENOMEM; if (!pm.buffer) goto out_task; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2f47ade1b567..0807ddf97b05 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { return pmd; } + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ + return 0; +} #endif #ifndef __HAVE_PFNMAP_TRACKING diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 13821c339a41..5672d7ea1fa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,7 +112,7 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 737685e9e852..68029b30c3dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -309,21 +309,20 @@ struct mlx5_hca_cap { __be16 max_desc_sz_rq; u8 rsvd21[2]; __be16 max_desc_sz_sq_dc; - u8 rsvd22[4]; - __be16 max_qp_mcg; - u8 rsvd23; + __be32 max_qp_mcg; + u8 rsvd22[3]; u8 log_max_mcg; - u8 rsvd24; + u8 rsvd23; u8 log_max_pd; - u8 rsvd25; + u8 rsvd24; u8 log_max_xrcd; - u8 rsvd26[42]; + u8 rsvd25[42]; __be16 log_uar_page_sz; - u8 rsvd27[28]; + u8 rsvd26[28]; u8 log_msx_atomic_size_qp; - u8 rsvd28[2]; + u8 rsvd27[2]; u8 log_msx_atomic_size_dc; - u8 rsvd29[76]; + u8 rsvd28[76]; }; @@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { struct mlx5_eqe_page_req { u8 rsvd0[2]; __be16 func_id; - u8 rsvd1[2]; - __be16 num_pages; - __be32 rsvd2[5]; + __be32 num_pages; + __be32 rsvd1[5]; }; union ev_data { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2aa258b0ced1..8888381fc150 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -358,7 +358,7 @@ struct mlx5_caps { u32 reserved_lkey; u8 local_ca_ack_delay; u8 log_max_mcg; - u16 max_qp_mcg; + u32 max_qp_mcg; int min_page_sz; }; @@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages); + s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); @@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); -typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); -int mlx5_register_health_report_handler(health_handler_t handler); -void mlx5_unregister_health_report_handler(void); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fb425aa16c01..faf4b7c1ad12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -332,6 +332,7 @@ struct mm_struct { unsigned long pgoff, unsigned long flags); #endif unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; diff --git a/include/linux/sched.h b/include/linux/sched.h index d722490da030..078066daffd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) * Test if a process is not yet dead (at most zombie state) * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. */ static inline int pid_alive(struct task_struct *p) { @@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p) * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. + * + * Return: 1 if the task structure is init. 0 otherwise. */ static inline int is_global_init(struct task_struct *tsk) { @@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu); /** * is_idle_task - is the specified task an idle task? * @p: the task in question. + * + * Return: 1 if @p is an idle task. 0 otherwise. */ static inline bool is_idle_task(const struct task_struct *p) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,9 +117,17 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* The lock does not imply full memory barrier. */ -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK -static inline void smp_mb__after_lock(void) { smp_mb(); } +/* + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with a LOAD inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). + */ +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif /** diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c5fd30d2a415..8d4fa82bfb91 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) swp_entry_t arch_entry; BUG_ON(pte_file(pte)); + if (pte_swp_soft_dirty(pte)) + pte = pte_swp_clear_soft_dirty(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4147d700a293..84662ecc7b51 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void); asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, int __user *); #else +#ifdef CONFIG_CLONE_BACKWARDS3 +asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, + int __user *, int); +#else asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, int); #endif +#endif asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f18b91966d3d..8a358a2c97e6 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) if (rc > 0) /* local bh are disabled so it is ok to use _BH */ NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_LOWLATENCYRXPACKETS, rc); + LINUX_MIB_BUSYPOLLRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && !need_resched() && !busy_loop_timeout(end_time)); @@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) return false; } -static inline bool sk_busy_poll(struct sock *sk, int nonblock) -{ - return false; -} - static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 781b3cf86a2f..a354db5b7662 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -static inline void tunnel_ip_select_ident(struct sk_buff *skb, - const struct iphdr *old_iph, - struct dst_entry *dst) -{ - struct iphdr *iph = ip_hdr(skb); - - /* Use inner packet iph-id if possible. */ - if (skb->protocol == htons(ETH_P_IP) && old_iph->id) - iph->id = old_iph->id; - else - __ip_select_ident(iph, dst, - (skb_shinfo(skb)->gso_segs ?: 1) - 1); -} - int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct net *net, struct rtable *rt, struct sk_buff *skb, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6eab63363e59..e5ae0c50fa9c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -683,13 +683,19 @@ struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; + u8 linklayer; u8 shift; }; static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, unsigned int len) { - return ((u64)(len + r->overhead) * r->mult) >> r->shift; + len += r->overhead; + + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) + return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; + + return ((u64)len * r->mult) >> r->shift; } extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); @@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, memset(res, 0, sizeof(*res)); res->rate = r->rate_bytes_ps; res->overhead = r->overhead; + res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } #endif diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index dbd71b0c7d8c..09d62b9228ff 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -73,9 +73,17 @@ struct tc_estimator { #define TC_H_ROOT (0xFFFFFFFFU) #define TC_H_INGRESS (0xFFFFFFF1U) +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ +enum tc_link_layer { + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ + TC_LINKLAYER_ETHERNET, + TC_LINKLAYER_ATM, +}; +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ + struct tc_ratespec { unsigned char cell_log; - unsigned char __reserved; + __u8 linklayer; /* lower 4 bits */ unsigned short overhead; short cell_align; unsigned short mpu; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index af0a674cc677..a1356d3b54df 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -253,7 +253,7 @@ enum LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ - LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ + LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ __LINUX_MIB_MAX }; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e5657788fedd..010a0083c0ae 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct cpuset *cs = cgroup_cs(cgrp); cpuset_filetype_t type = cft->private; - int retval = -ENODEV; + int retval = 0; mutex_lock(&cpuset_mutex); - if (!is_cpuset_online(cs)) + if (!is_cpuset_online(cs)) { + retval = -ENODEV; goto out_unlock; + } switch (type) { case FILE_CPU_EXCLUSIVE: diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a968..e23bb19e2a3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val) +#elif defined(CONFIG_CLONE_BACKWARDS3) +SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, + int, stack_size, + int __user *, parent_tidptr, + int __user *, child_tidptr, + int, tls_val) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..a52ee7bb830d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) might_sleep(); ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; @@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9c..a394297f8b2f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) } EXPORT_SYMBOL_GPL(pm_qos_request_active); +static void __pm_qos_update_request(struct pm_qos_request *req, + s32 new_value) +{ + trace_pm_qos_update_request(req->pm_qos_class, new_value); + + if (new_value != req->node.prio) + pm_qos_update_target( + pm_qos_array[req->pm_qos_class]->constraints, + &req->node, PM_QOS_UPDATE_REQ, new_value); +} + /** * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout * @work: work struct for the delayed work (timeout) @@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) struct pm_qos_request, work); - pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); + __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); } /** @@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, } cancel_delayed_work_sync(&req->work); - - trace_pm_qos_update_request(req->pm_qos_class, new_value); - if (new_value != req->node.prio) - pm_qos_update_target( - pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + __pm_qos_update_request(req, new_value); } EXPORT_SYMBOL_GPL(pm_qos_update_request); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..05c39f030314 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. + * + * Return: 1 if the task is currently executing. 0 otherwise. */ inline int task_curr(const struct task_struct *p) { @@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * - * Returns %true if @p was woken up, %false if it was already running + * Return: %true if @p was woken up, %false if it was already running. * or @state didn't match @p's state. */ static int @@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) unsigned long flags; int cpu, success = 0; - smp_wmb(); + /* + * If we are going to wake up a thread waiting for CONDITION we + * need to ensure that CONDITION=1 done by the caller can not be + * reordered with p->state check below. This pairs with mb() in + * set_current_state() the waiting thread does. + */ + smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); if (!(p->state & state)) goto out; @@ -1577,8 +1585,9 @@ out: * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable - * processes. Returns 1 if the process was woken up, 0 if it was already - * running. + * processes. + * + * Return: 1 if the process was woken up, 0 if it was already running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. @@ -2191,6 +2200,8 @@ void scheduler_tick(void) * This makes sure that uptime, CFS vruntime, load * balancing, etc... continue to move forward, even * with a very low granularity. + * + * Return: Maximum deferment in nanoseconds. */ u64 scheduler_tick_max_deferment(void) { @@ -2394,6 +2405,12 @@ need_resched: if (sched_feat(HRTICK)) hrtick_clear(rq); + /* + * Make sure that signal_pending_state()->signal_pending() below + * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) + * done by the caller to avoid the race with signal_wake_up(). + */ + smp_mb__before_spinlock(); raw_spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw; @@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) @@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. The caller is accounted as waiting for IO. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) @@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); * This waits for completion of a specific task to be signaled. It is * interruptible. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_interruptible(struct completion *x) { @@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_interruptible_timeout(struct completion *x, @@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_killable(struct completion *x) { @@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_killable_timeout(struct completion *x, @@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); * try_wait_for_completion - try to decrement a completion without blocking * @x: completion structure * - * Returns: 0 if a decrement cannot be done without blocking + * Return: 0 if a decrement cannot be done without blocking * 1 if a decrement succeeded. * * If a completion is being used as a counting completion, @@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion); * completion_done - Test to see if a completion has any waiters * @x: completion structure * - * Returns: 0 if there are waiters (wait_for_completion() in progress) + * Return: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * */ @@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment) * task_prio - return the priority value of a given task. * @p: the task in question. * - * This is the priority value as seen by users in /proc. + * Return: The priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ @@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p) /** * task_nice - return the nice value of a given task. * @p: the task in question. + * + * Return: The nice value [ -20 ... 0 ... 19 ]. */ int task_nice(const struct task_struct *p) { @@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. */ int idle_cpu(int cpu) { @@ -3226,6 +3247,8 @@ int idle_cpu(int cpu) /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. + * + * Return: The idle task for the cpu @cpu. */ struct task_struct *idle_task(int cpu) { @@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu) /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. + * + * The task of @pid, if found. %NULL otherwise. */ static struct task_struct *find_process_by_pid(pid_t pid) { @@ -3432,6 +3457,8 @@ recheck: * @policy: new policy. * @param: structure containing the new RT priority. * + * Return: 0 on success. An error code otherwise. + * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, @@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. + * + * Return: 0 on success. An error code otherwise. */ int sched_setscheduler_nocheck(struct task_struct *p, int policy, const struct sched_param *param) @@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) @@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) { @@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. + * + * Return: On success, the policy of the thread. Otherwise, a negative error + * code. */ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) { @@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. + * + * Return: On success, 0 and the RT priority is in @param. Otherwise, an error + * code. */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { @@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3710,6 +3751,8 @@ out_unlock: * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. + * + * Return: 0. */ SYSCALL_DEFINE0(sched_yield) { @@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield); * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * - * Returns: + * Return: * true (>0) if we indeed boosted the target task. * false (0) if we failed to boost the target. * -ESRCH if there's no task to yield to. @@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout) * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * - * this syscall returns the maximum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the maximum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) { @@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * - * this syscall returns the minimum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the minimum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) { @@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. + * + * Return: On success, 0 and the timeslice is in @interval. Otherwise, + * an error code. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) @@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void) * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! + * + * Return: The current task for @cpu. */ struct task_struct *curr_task(int cpu) { diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46f..8b836b376d91 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -62,7 +62,7 @@ static int convert_prio(int prio) * any discrepancies created by racing against the uncertainty of the current * priority configuration. * - * Returns: (int)bool - CPUs were found + * Return: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) @@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * - * Returns: -ENOMEM if memory fails. + * Return: -ENOMEM on memory allocation failure. */ int cpupri_init(struct cpupri *cp) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9565645e3202..68f1609ca149 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) */ update_entity_load_avg(curr, 1); update_cfs_rq_blocked_load(cfs_rq, 1); + update_cfs_shares(cfs_rq); #ifdef CONFIG_SCHED_HRTICK /* @@ -4280,6 +4281,8 @@ struct sg_lb_stats { * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. * @idle: The Idle status of the CPU for whose sd load_icx is obtained. + * + * Return: The load index. */ static inline int get_sd_load_idx(struct sched_domain *sd, enum cpu_idle_type idle) @@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, * * Determine if @sg is a busier group than the previously selected * busiest group. + * + * Return: %true if @sg is a busier group than the previously selected + * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, @@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, * assuming lower CPU number will be equivalent to lower a SMT thread * number. * - * Returns 1 when packing is required and a task should be moved to + * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * * @env: The load balancing environment. @@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * - * Returns: - the busiest group if imbalance exists. + * Return: - The busiest group if imbalance exists. * - If no imbalance and user has opted for power-savings balance, * return the least loaded group whose CPUs can be * put to idle by rebalancing its tasks onto our group. diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f09..0b479a6a22bb 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) BUG_ON(bits > 32); WARN_ON(!irqs_disabled()); read_sched_clock = read; - sched_clock_mask = (1 << bits) - 1; + sched_clock_mask = (1ULL << bits) - 1; cd.rate = rate; /* calculate the mult/shift to convert counter ticks to ns. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e77edc97e036..e8a1516cc0a3 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -182,7 +182,8 @@ static bool can_stop_full_tick(void) * Don't allow the user to think they can get * full NO_HZ with this machine. */ - WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); + WARN_ONCE(have_nohz_full_mask, + "NO_HZ FULL will not work with unstable sched clock"); return false; } #endif @@ -343,8 +344,6 @@ static int tick_nohz_init_all(void) void __init tick_nohz_init(void) { - int cpu; - if (!have_nohz_full_mask) { if (tick_nohz_init_all() < 0) return; diff --git a/kernel/wait.c b/kernel/wait.c index dec68bd4e9d8..d550920e040c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); /** * wake_up_atomic_t - Wake up a waiter on a atomic_t - * @word: The word being waited on, a kernel virtual address - * @bit: The bit of the word being waited on + * @p: The atomic_t being waited on, a kernel virtual address * * Wake up anyone waiting for the atomic_t to go to zero. * diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot) { int err = -ENOMEM; - pte_t *pte; + pte_t *pte, ptfile; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; - if (!pte_none(*pte)) + ptfile = pgoff_to_pte(pgoff); + + if (!pte_none(*pte)) { + if (pte_present(*pte) && pte_soft_dirty(*pte)) + pte_file_mksoft_dirty(ptfile); zap_pte(mm, vma, addr, pte); + } - set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); + set_pte_at(mm, addr, pte, ptfile); /* * We don't need to run update_mmu_cache() here because the "file pte" * being installed by install_file_pte() is not a real pte - it's a diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, mm = vma->vm_mm; - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); tlb_finish_mmu(&tlb, start, end); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c290a1cf3862..c5792a5d87ce 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, if (!s->memcg_params) return -ENOMEM; - INIT_WORK(&s->memcg_params->destroy, - kmem_cache_destroy_work_func); if (memcg) { s->memcg_params->memcg = memcg; s->memcg_params->root_cache = root_cache; + INIT_WORK(&s->memcg_params->destroy, + kmem_cache_destroy_work_func); } else s->memcg_params->is_root_cache = true; diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) * tear-down from @mm. The @fullmm argument is used when @mm is without * users and we're going to destroy the full address space (exit/execve). */ -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); tlb->need_flush_all = 0; - tlb->start = -1UL; - tlb->end = 0; + tlb->start = start; + tlb->end = end; tlb->need_flush = 0; tlb->local.next = NULL; tlb->local.nr = 0; @@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e { struct mmu_gather_batch *batch, *next; - tlb->start = start; - tlb->end = end; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ @@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, spinlock_t *ptl; pte_t *start_pte; pte_t *pte; - unsigned long range_start = addr; again: init_rss_vec(rss); @@ -1141,9 +1139,12 @@ again: continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, - addr) != page->index) - set_pte_at(mm, addr, pte, - pgoff_to_pte(page->index)); + addr) != page->index) { + pte_t ptfile = pgoff_to_pte(page->index); + if (pte_soft_dirty(ptent)) + pte_file_mksoft_dirty(ptfile); + set_pte_at(mm, addr, pte, ptfile); + } if (PageAnon(page)) rss[MM_ANONPAGES]--; else { @@ -1202,17 +1203,25 @@ again: * and page-free while holding it. */ if (force_flush) { + unsigned long old_end; + force_flush = 0; -#ifdef HAVE_GENERIC_MMU_GATHER - tlb->start = range_start; + /* + * Flush the TLB just for the previous segment, + * then update the range to be the remaining + * TLB range. + */ + old_end = tlb->end; tlb->end = addr; -#endif + tlb_flush_mmu(tlb); - if (addr != end) { - range_start = addr; + + tlb->start = addr; + tlb->end = old_end; + + if (addr != end) goto again; - } } return addr; @@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end = start + size; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) @@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr unsigned long end = address + size; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, address, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); unmap_single_vma(&tlb, vma, address, end, details); @@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, exclusive = 1; } flush_icache_page(vma, page); + if (pte_swp_soft_dirty(orig_pte)) + pte = pte_mksoft_dirty(pte); set_pte_at(mm, address, page_table, pte); if (page == swapcache) do_page_add_anon_rmap(page, vma, address, exclusive); @@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) entry = maybe_mkwrite(pte_mkdirty(entry), vma); + else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) + pte_mksoft_dirty(entry); if (anon) { inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); diff --git a/mm/mmap.c b/mm/mmap.c index 1edbaa3136c3..f9c97d10b873 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, struct mmu_gather tlb; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, @@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) lru_add_drain(); flush_cache_mm(mm); - tlb_gather_mmu(&tlb, mm, 1); + tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); diff --git a/mm/rmap.c b/mm/rmap.c index cd356df4f71a..b2e29acd7e3d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_entry_to_pte(make_hwpoison_entry(page))); } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; + pte_t swp_pte; if (PageSwapCache(page)) { /* @@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); entry = make_migration_entry(page, pte_write(pteval)); } - set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + set_pte_at(mm, address, pte, swp_pte); BUG_ON(pte_file(*pte)); } else if (IS_ENABLED(CONFIG_MIGRATION) && (TTU_ACTION(flags) == TTU_MIGRATION)) { @@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, pteval = ptep_clear_flush(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ - if (page->index != linear_page_index(vma, address)) - set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); + if (page->index != linear_page_index(vma, address)) { + pte_t ptfile = pgoff_to_pte(page->index); + if (pte_soft_dirty(pteval)) + pte_file_mksoft_dirty(ptfile); + set_pte_at(mm, address, pte, ptfile); + } /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) } #endif /* CONFIG_HIBERNATION */ +static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) +{ +#ifdef CONFIG_MEM_SOFT_DIRTY + /* + * When pte keeps soft dirty bit the pte generated + * from swap entry does not has it, still it's same + * pte from logical point of view. + */ + pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); + return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); +#else + return pte_same(pte, swp_pte); +#endif +} + /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to @@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, } pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { mem_cgroup_cancel_charge_swapin(memcg); ret = 0; goto out; @@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, * swapoff spends a _lot_ of time in this loop! * Test inline before going to call unuse_pte. */ - if (unlikely(pte_same(*pte, swp_pte))) { + if (unlikely(maybe_same_pte(*pte, swp_pte))) { pte_unmap(pte); ret = unuse_pte(vma, pmd, addr, entry, page); if (ret) diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4a78c4de9f20..6ee48aac776f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - return vlan_dev_priv(dev)->real_dev; + struct net_device *ret = vlan_dev_priv(dev)->real_dev; + + while (is_vlan_dev(ret)) + ret = vlan_dev_priv(ret)->real_dev; + + return ret; } EXPORT_SYMBOL(vlan_dev_real_dev); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e14531f1ce1c..264de88db320 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1529,6 +1529,8 @@ out: * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. + * + * This call might reallocate skb data. */ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f105219f4a4b..7614af31daff 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -508,6 +508,7 @@ out: return 0; } +/* this call might reallocate skb data */ static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) { int ret = false; @@ -568,6 +569,7 @@ out: return ret; } +/* this call might reallocate skb data */ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) { struct ethhdr *ethhdr; @@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) return false; + + /* skb->data might have been reallocated by pskb_may_pull() */ + ethhdr = (struct ethhdr *)skb->data; + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) + ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); + udphdr = (struct udphdr *)(skb->data + *header_len); *header_len += sizeof(*udphdr); @@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) return true; } +/* this call might reallocate skb data */ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr) + struct sk_buff *skb) { struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *curr_gw = NULL; + struct ethhdr *ethhdr; bool ret, out_of_range = false; unsigned int header_len = 0; uint8_t curr_tq_avg; @@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, if (!ret) goto out; + ethhdr = (struct ethhdr *)skb->data; orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest); if (!orig_dst_node) diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 039902dca4a6..1037d75da51f 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, void batadv_gw_node_purge(struct batadv_priv *bat_priv); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); -bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr); +bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 700d0b49742d..0f04e1c302b4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb, if (batadv_bla_tx(bat_priv, skb, vid)) goto dropped; + /* skb->data might have been reallocated by batadv_bla_tx() */ + ethhdr = (struct ethhdr *)skb->data; + /* Register the client MAC in the transtable */ if (!is_multicast_ether_addr(ethhdr->h_source)) batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); @@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb, default: break; } + + /* reminder: ethhdr might have become unusable from here on + * (batadv_gw_is_dhcp_target() might have reallocated skb data) + */ } /* ethernet packet should be broadcasted */ @@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb, /* unicast packet */ } else { if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { - ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); + ret = batadv_gw_out_of_range(bat_priv, skb); if (ret) goto dropped; } diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc8b5d4dd636..688a0419756b 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, * @skb: the skb containing the payload to encapsulate * @orig_node: the destination node * - * Returns false if the payload could not be encapsulated or true otherwise + * Returns false if the payload could not be encapsulated or true otherwise. + * + * This call might reallocate skb data. */ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, struct batadv_orig_node *orig_node) @@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, * @orig_node: the destination node * @packet_subtype: the batman 4addr packet subtype to use * - * Returns false if the payload could not be encapsulated or true otherwise + * Returns false if the payload could not be encapsulated or true otherwise. + * + * This call might reallocate skb data. */ bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, struct sk_buff *skb, @@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, struct batadv_neigh_node *neigh_node; int data_len = skb->len; int ret = NET_RX_DROP; - unsigned int dev_mtu; + unsigned int dev_mtu, header_len; /* get routing information */ if (is_multicast_ether_addr(ethhdr->h_dest)) { @@ -429,10 +433,12 @@ find_router: switch (packet_type) { case BATADV_UNICAST: batadv_unicast_prepare_skb(skb, orig_node); + header_len = sizeof(struct batadv_unicast_packet); break; case BATADV_UNICAST_4ADDR: batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, packet_subtype); + header_len = sizeof(struct batadv_unicast_4addr_packet); break; default: /* this function supports UNICAST and UNICAST_4ADDR only. It @@ -441,6 +447,7 @@ find_router: goto out; } + ethhdr = (struct ethhdr *)(skb->data + header_len); unicast_packet = (struct batadv_unicast_packet *)skb->data; /* inform the destination node that we are still missing a correct route diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 61c5e819380e..08e576ada0b2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); if (max_delay) group = &mld->mld_mca; - } else if (skb->len >= sizeof(*mld2q)) { + } else { if (!pskb_may_pull(skb, sizeof(*mld2q))) { err = -EINVAL; goto out; diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 394bb96b6087..3b9637fb7939 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -1,5 +1,5 @@ /* - * Sysfs attributes of bridge ports + * Sysfs attributes of bridge * Linux ethernet bridge * * Authors: diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 00ee068efc1c..b84a1b155bc1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -65,6 +65,7 @@ ipv6: nhoff += sizeof(struct ipv6hdr); break; } + case __constant_htons(ETH_P_8021AD): case __constant_htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; struct vlan_hdr _vlan; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9232c68941ab..60533db8b72d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, atomic_set(&p->refcnt, 1); p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); + dev_hold(dev); + p->dev = dev; + write_pnet(&p->net, hold_net(net)); + p->sysctl_table = NULL; if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { + release_net(net); + dev_put(dev); kfree(p); return NULL; } - dev_hold(dev); - p->dev = dev; - write_pnet(&p->net, hold_net(net)); - p->sysctl_table = NULL; write_lock_bh(&tbl->lock); p->next = tbl->parms.next; tbl->parms.next = p; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3de740834d1f..ca198c1d1d30 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, /* If aging addresses are supported device will need to * implement its own handler for this. */ - if (ndm->ndm_state & NUD_PERMANENT) { + if (!(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", dev->name); return -EINVAL; } @@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) struct nlattr *extfilt; u32 filter_mask = 0; - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), IFLA_EXT_MASK); if (extfilt) filter_mask = nla_get_u32(extfilt); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ab3d814bc80a..109ee89f123e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) } return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - - net_adj) & ~(align - 1)) + (net_adj - 2); + net_adj) & ~(align - 1)) + net_adj - 2; } static void esp4_err(struct sk_buff *skb, u32 info) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 108a1e9c9eac..3df6d3edb2a1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -71,7 +71,6 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> -#include <linux/prefetch.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/ip.h> @@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) if (!c) continue; - if (IS_LEAF(c)) { - prefetch(rcu_dereference_rtnl(p->child[idx])); + if (IS_LEAF(c)) return (struct leaf *) c; - } /* Rescan start scanning in new node */ p = (struct tnode *) c; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1f6eab66f7ce..8d6939eeb492 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, if (daddr) memcpy(&iph->daddr, daddr, 4); if (iph->daddr) - return t->hlen; + return t->hlen + sizeof(*iph); return -(t->hlen + sizeof(*iph)); } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 7167b08977df..850525b34899 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; - tunnel_ip_select_ident(skb, - (const struct iphdr *)skb_inner_network_header(skb), - &rt->dst); + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6577a1149a47..463bd1273346 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), - SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), + SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9077f441cb2..b6ae92a51f58 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -206,8 +206,8 @@ static u32 cubic_root(u64 a) */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) { - u64 offs; - u32 delta, t, bic_target, max_cnt; + u32 delta, bic_target, max_cnt; + u64 offs, t; ca->ack_cnt++; /* count the number of ACKs */ @@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) * if the cwnd < 1 million packets !!! */ + t = (s32)(tcp_time_stamp - ca->epoch_start); + t += msecs_to_jiffies(ca->delay_min >> 3); /* change the unit from HZ to bictcp_HZ */ - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) - - ca->epoch_start) << BICTCP_HZ) / HZ; + t <<= BICTCP_HZ; + do_div(t, HZ); if (t < ca->bic_K) /* t - K */ offs = ca->bic_K - t; @@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) return; /* Discard delay samples right after fast recovery */ - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) return; delay = (rtt_us << 3) / USEC_PER_MSEC; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 40ffd72243a4..aeac0dc3635d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) net_adj = 0; return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - - net_adj) & ~(align - 1)) + (net_adj - 2); + net_adj) & ~(align - 1)) + net_adj - 2; } static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bff3d821c7eb..c4ff5bbb45c4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { #ifdef CONFIG_IPV6_SUBTREES - if (fn->subtree) - fn = fib6_lookup_1(fn->subtree, args + 1); + if (fn->subtree) { + struct fib6_node *sfn; + sfn = fib6_lookup_1(fn->subtree, + args + 1); + if (!sfn) + goto backtrack; + fn = sfn; + } #endif - if (!fn || fn->fn_flags & RTN_RTINFO) + if (fn->fn_flags & RTN_RTINFO) return fn; } } - +#ifdef CONFIG_IPV6_SUBTREES +backtrack: +#endif if (fn->fn_flags & RTN_ROOT) break; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ae31968d42d3..cc9e02d79b55 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -31,10 +31,12 @@ #include "led.h" #define IEEE80211_AUTH_TIMEOUT (HZ / 5) +#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) +#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) #define IEEE80211_ASSOC_MAX_TRIES 3 @@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, - struct cfg80211_chan_def *chandef, bool verbose) + struct cfg80211_chan_def *chandef, bool tracking) { + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_chan_def vht_chandef; u32 ht_cfreq, ret; @@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, channel->band); /* check that channel matches the right operating channel */ - if (channel->center_freq != ht_cfreq) { + if (!tracking && channel->center_freq != ht_cfreq) { /* * It's possible that some APs are confused here; * Netgear WNDR3700 sometimes reports 4 higher than @@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, * since we look at probe response/beacon data here * it should be OK. */ - if (verbose) - sdata_info(sdata, - "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", - channel->center_freq, ht_cfreq, - ht_oper->primary_chan, channel->band); + sdata_info(sdata, + "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", + channel->center_freq, ht_cfreq, + ht_oper->primary_chan, channel->band); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; goto out; } @@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, channel->band); break; default: - if (verbose) + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT operation IE has invalid channel width (%d), disable VHT\n", vht_oper->chan_width); @@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } if (!cfg80211_chandef_valid(&vht_chandef)) { - if (verbose) + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; @@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { - if (verbose) + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information doesn't match HT, disable VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; @@ -333,18 +335,27 @@ out: if (ret & IEEE80211_STA_DISABLE_VHT) vht_chandef = *chandef; + /* + * Ignore the DISABLED flag when we're already connected and only + * tracking the APs beacon for bandwidth changes - otherwise we + * might get disconnected here if we connect to an AP, update our + * regulatory information based on the AP's country IE and the + * information we have is wrong/outdated and disables the channel + * that we're actually using for the connection to the AP. + */ while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, - IEEE80211_CHAN_DISABLED)) { + tracking ? 0 : + IEEE80211_CHAN_DISABLED)) { if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; - goto out; + break; } ret |= chandef_downgrade(chandef); } - if (chandef->width != vht_chandef.width && verbose) + if (chandef->width != vht_chandef.width && !tracking) sdata_info(sdata, "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); @@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, /* calculate new channel (type) based on HT/VHT operation IEs */ flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, - vht_oper, &chandef, false); + vht_oper, &chandef, true); /* * Downgrade the new channel if we associated with restricted @@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; - ifmgd->auth_data->timeout_started = true; + auth_data->timeout_started = true; run_again(sdata, auth_data->timeout); } else { - auth_data->timeout_started = false; + auth_data->timeout = + round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); + auth_data->timeout_started = true; + run_again(sdata, auth_data->timeout); } return 0; @@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) assoc_data->timeout_started = true; run_again(sdata, assoc_data->timeout); } else { - assoc_data->timeout_started = false; + assoc_data->timeout = + round_jiffies_up(jiffies + + IEEE80211_ASSOC_TIMEOUT_LONG); + assoc_data->timeout_started = true; + run_again(sdata, assoc_data->timeout); } return 0; @@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, ht_oper, vht_oper, - &chandef, true); + &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), local->rx_chains); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376eea5f..2f8010707d01 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; __u32 seq, ack, sack, end, win, swin; s16 receiver_offset; - bool res; + bool res, in_recv_win; /* * Get the required data from the packet. @@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct, receiver->td_end, receiver->td_maxend, receiver->td_maxwin, receiver->td_scale); + /* Is the ending sequence in the receive window (if available)? */ + in_recv_win = !receiver->td_maxwin || + after(end, sender->td_end - receiver->td_maxwin - 1); + pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", before(seq, sender->td_maxend + 1), - after(end, sender->td_end - receiver->td_maxwin - 1), + (in_recv_win ? 1 : 0), before(sack, receiver->td_end + 1), after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); if (before(seq, sender->td_maxend + 1) && - after(end, sender->td_end - receiver->td_maxwin - 1) && + in_recv_win && before(sack, receiver->td_end + 1) && after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { /* @@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct, nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_tcp: %s ", before(seq, sender->td_maxend + 1) ? - after(end, sender->td_end - receiver->td_maxwin - 1) ? + in_recv_win ? before(sack, receiver->td_end + 1) ? after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" : "ACK is under the lower bound (possible overly delayed ACK)" diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 962e9792e317..d92cc317bf8b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); + memset(&pmsg, 0, sizeof(pmsg)); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; @@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log, if (indev && skb->dev && skb->mac_header != skb->network_header) { struct nfulnl_msg_packet_hw phw; - int len = dev_parse_header(skb, phw.hw_addr); + int len; + + memset(&phw, 0, sizeof(phw)); + len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea145ab3e..8a703c3dd318 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { struct nfqnl_msg_packet_hw phw; - int len = dev_parse_header(entskb, phw.hw_addr); + int len; + + memset(&phw, 0, sizeof(phw)); + len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 7011c71646f0..6113cc7efffc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, { const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; - unsigned int tcplen, i; + int len, tcp_hdrlen; + unsigned int i; __be16 oldval; u16 newmss; u8 *opt; @@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (!skb_make_writable(skb, skb->len)) return -1; - tcplen = skb->len - tcphoff; + len = skb->len - tcphoff; + if (len < (int)sizeof(struct tcphdr)) + return -1; + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); + tcp_hdrlen = tcph->doff * 4; - /* Header cannot be larger than the packet */ - if (tcplen < tcph->doff*4) + if (len < tcp_hdrlen) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, newmss = info->mss; opt = (u_int8_t *)tcph; - for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { - if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && - opt[i+1] == TCPOLEN_MSS) { + for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { + if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { u_int16_t oldmss; oldmss = (opt[i+2] << 8) | opt[i+3]; @@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, } /* There is data after the header so the option can't be added - without moving it, and doing so may make the SYN packet - itself too large. Accept the packet unmodified instead. */ - if (tcplen > tcph->doff*4) + * without moving it, and doing so may make the SYN packet + * itself too large. Accept the packet unmodified instead. + */ + if (len > tcp_hdrlen) return 0; /* @@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, newmss = min(newmss, (u16)1220); opt = (u_int8_t *)tcph + sizeof(struct tcphdr); - memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); + memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, - htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); + htons(len), htons(len + TCPOLEN_MSS), 1); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..625fa1d636a0 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c @@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, struct tcphdr *tcph; u_int16_t n, o; u_int8_t *opt; - int len; + int len, tcp_hdrlen; /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) @@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, return NF_DROP; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); - if (tcph->doff * 4 > len) + tcp_hdrlen = tcph->doff * 4; + + if (len < tcp_hdrlen) return NF_DROP; opt = (u_int8_t *)tcph; @@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, * Walk through all TCP options - if we find some option to remove, * set all octets to %TCPOPT_NOP and adjust checksum. */ - for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { + for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { optl = optlen(opt, i); - if (i + optl > tcp_hdrlen(skb)) + if (i + optl > tcp_hdrlen) break; if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..f85f8a2ad6cf 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); int chains_to_skip = cb->args[0]; int fams_to_skip = cb->args[1]; + bool need_locking = chains_to_skip || fams_to_skip; + + if (need_locking) + genl_lock(); for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { n = 0; @@ -810,6 +814,9 @@ errout: cb->args[0] = i; cb->args[1] = n; + if (need_locking) + genl_unlock(); + return skb->len; } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 22c5f399f1cf..ab101f715447 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) { struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + OVS_CB(skb)->tun_key = NULL; return do_execute_actions(dp, skb, acts->actions, acts->actions_len, false); } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f7e3a0d84c40..f2ed7600084e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) ovs_notify(reply, info, &ovs_dp_vport_multicast_group); return 0; - rtnl_unlock(); - return 0; - exit_free: kfree_skb(reply); exit_unlock: diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 5c519b121e1b..1aa84dc58777 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) struct flex_array *buckets; int i, err; - buckets = flex_array_alloc(sizeof(struct hlist_head *), + buckets = flex_array_alloc(sizeof(struct hlist_head), n_buckets, GFP_KERNEL); if (!buckets) return NULL; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 281c1bded1f6..51b968d3febb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) return q; } +/* The linklayer setting were not transferred from iproute2, in older + * versions, and the rate tables lookup systems have been dropped in + * the kernel. To keep backward compatible with older iproute2 tc + * utils, we detect the linklayer setting by detecting if the rate + * table were modified. + * + * For linklayer ATM table entries, the rate table will be aligned to + * 48 bytes, thus some table entries will contain the same value. The + * mpu (min packet unit) is also encoded into the old rate table, thus + * starting from the mpu, we find low and high table entries for + * mapping this cell. If these entries contain the same value, when + * the rate tables have been modified for linklayer ATM. + * + * This is done by rounding mpu to the nearest 48 bytes cell/entry, + * and then roundup to the next cell, calc the table entry one below, + * and compare. + */ +static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) +{ + int low = roundup(r->mpu, 48); + int high = roundup(low+1, 48); + int cell_low = low >> r->cell_log; + int cell_high = (high >> r->cell_log) - 1; + + /* rtab is too inaccurate at rates > 100Mbit/s */ + if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { + pr_debug("TC linklayer: Giving up ATM detection\n"); + return TC_LINKLAYER_ETHERNET; + } + + if ((cell_high > cell_low) && (cell_high < 256) + && (rtab[cell_low] == rtab[cell_high])) { + pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", + cell_low, cell_high, rtab[cell_high]); + return TC_LINKLAYER_ATM; + } + return TC_LINKLAYER_ETHERNET; +} + static struct qdisc_rate_table *qdisc_rtab_list; struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) @@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta rtab->rate = *r; rtab->refcnt = 1; memcpy(rtab->data, nla_data(tab), 1024); + if (r->linklayer == TC_LINKLAYER_UNAWARE) + r->linklayer = __detect_linklayer(r, rtab->data); rtab->next = qdisc_rtab_list; qdisc_rtab_list = rtab; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4626cef4b76e..48be3d5c0d92 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -25,6 +25,7 @@ #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> +#include <linux/if_vlan.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/dst.h> @@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q) unsigned long dev_trans_start(struct net_device *dev) { - unsigned long val, res = dev->trans_start; + unsigned long val, res; unsigned int i; + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + res = dev->trans_start; for (i = 0; i < dev->num_tx_queues; i++) { val = netdev_get_tx_queue(dev, i)->trans_start; if (val && time_after(val, res)) res = val; } dev->trans_start = res; + return res; } EXPORT_SYMBOL(dev_trans_start); @@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; r->rate_bytes_ps = conf->rate; + r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; /* * The deal here is to replace a divide by a reciprocal one diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 45e751527dfc..c2178b15ca6e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)*arg, *parent; struct nlattr *opt = tca[TCA_OPTIONS]; + struct qdisc_rate_table *rtab = NULL, *ctab = NULL; struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_opt *hopt; @@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, if (!hopt->rate.rate || !hopt->ceil.rate) goto failure; + /* Keeping backward compatible with rate_table based iproute2 tc */ + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); + if (rtab) + qdisc_put_rtab(rtab); + } + if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); + if (ctab) + qdisc_put_rtab(ctab); + } + if (!cl) { /* new class */ struct Qdisc *new_q; int prio; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bce5b79662a6..ab67efc64b24 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, else spc_state = SCTP_ADDR_AVAILABLE; /* Don't inform ULP about transition from PF to - * active state and set cwnd to 1, see SCTP + * active state and set cwnd to 1 MTU, see SCTP * Quick failover draft section 5.1, point 5 */ if (transport->state == SCTP_PF) { ulp_notify = false; - transport->cwnd = 1; + transport->cwnd = asoc->pathmtu; } transport->state = SCTP_ACTIVE; break; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bdbbc3fd7c14..8fdd16046d66 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport) return; } - call_rcu(&transport->rcu, sctp_transport_destroy_rcu); - sctp_packet_free(&transport->packet); if (transport->asoc) sctp_association_put(transport->asoc); + + call_rcu(&transport->rcu, sctp_transport_destroy_rcu); } /* Start T3_rtx timer if it is not already running and update the heartbeat diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index cb29ef7ba2f0..609c30c80816 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr) { struct tipc_link *l_ptr; struct tipc_link *temp_l_ptr; + struct tipc_link_req *temp_req; pr_info("Disabling bearer <%s>\n", b_ptr->name); spin_lock_bh(&b_ptr->lock); @@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr) list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { tipc_link_delete(l_ptr); } - if (b_ptr->link_req) - tipc_disc_delete(b_ptr->link_req); + temp_req = b_ptr->link_req; + b_ptr->link_req = NULL; spin_unlock_bh(&b_ptr->lock); + + if (temp_req) + tipc_disc_delete(temp_req); + memset(b_ptr, 0, sizeof(struct tipc_bearer)); } diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 593071dabd1c..4d9334683f84 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], - connected_table); + connected_table) fn(sk_vsock(vsk)); } diff --git a/net/wireless/core.c b/net/wireless/core.c index 4f9f216665e9..a8c29fa4f1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, cfg80211_leave_mesh(rdev, dev); break; case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev); break; default: diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25d217d90807..3fcba69817e5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, goto out_unlock; } *rdev = wiphy_to_dev((*wdev)->wiphy); - cb->args[0] = (*rdev)->wiphy_idx; + /* 0 is the first index - add 1 to parse only once */ + cb->args[0] = (*rdev)->wiphy_idx + 1; cb->args[1] = (*wdev)->identifier; } else { - struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); + /* subtract the 1 again here */ + struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; if (!wiphy) { diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8e77cbbad871..e3c7ba8d7582 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1, } #define nid_has_mute(codec, nid, dir) \ - check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) + check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) #define nid_has_volume(codec, nid, dir) \ check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) @@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, if (enable) val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; } - if (caps & AC_AMPCAP_MUTE) { + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (!enable) val |= HDA_AMP_MUTE; } @@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec, { unsigned int mask = 0xff; - if (caps & AC_AMPCAP_MUTE) { + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) mask &= ~0x80; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8bd226149868..f303cd898515 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1031,6 +1031,7 @@ enum { ALC880_FIXUP_GPIO2, ALC880_FIXUP_MEDION_RIM, ALC880_FIXUP_LG, + ALC880_FIXUP_LG_LW25, ALC880_FIXUP_W810, ALC880_FIXUP_EAPD_COEF, ALC880_FIXUP_TCL_S700, @@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = { { } } }, + [ALC880_FIXUP_LG_LW25] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x0181344f }, /* line-in */ + { 0x1b, 0x0321403f }, /* headphone */ + { } + } + }, [ALC880_FIXUP_W810] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), + SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), /* Below is the copied entries from alc880_quirks.c. @@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 987f728718c5..be2ba1b6fe4a 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); +static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); + static const unsigned int limiter_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), @@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { SOC_ENUM("Beep Pitch", beep_pitch_enum), SOC_ENUM("Beep on Time", beep_ontime_enum), SOC_ENUM("Beep off Time", beep_offtime_enum), - SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), + SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, + 0, 0x07, 0x1f, beep_tlv), SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6c8a9e7bee25..760e8bfeacaa 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w, static int power_vag_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { + const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; + switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, @@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, break; case SND_SOC_DAPM_PRE_PMD: - snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VAG_POWERUP, 0); - msleep(400); + /* + * Don't clear VAG_POWERUP, when both DAC and ADC are + * operational to prevent inadvertently starving the + * other one of them. + */ + if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) & + mask) != mask) { + snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, 0); + msleep(400); + } break; default: break; @@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = { SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", SGTL5000_CHIP_ANA_ADC_CTRL, - 8, 2, 0, capture_6db_attenuate), + 8, 1, 0, capture_6db_attenuate), SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), SOC_DOUBLE_TLV("Headphone Playback Volume", diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index bd16010441cc..4375c9f2b791 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) return -EINVAL; } - path = list_first_entry(&w->sources, struct snd_soc_dapm_path, - list_sink); - if (!path) { + if (list_empty(&w->sources)) { dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); return -EINVAL; } + path = list_first_entry(&w->sources, struct snd_soc_dapm_path, + list_sink); + ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); if (ret < 0) return ret; diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index d04146cad61f..47565fd04505 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream, reg = TEGRA30_I2S_CIF_RX_CTRL; } else { val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; - reg = TEGRA30_I2S_CIF_RX_CTRL; + reg = TEGRA30_I2S_CIF_TX_CTRL; } regmap_write(i2s->regmap, reg, val); diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c index 26722423330d..f3dd7266c391 100644 --- a/sound/usb/6fire/midi.c +++ b/sound/usb/6fire/midi.c @@ -19,6 +19,10 @@ #include "chip.h" #include "comm.h" +enum { + MIDI_BUFSIZE = 64 +}; + static void usb6fire_midi_out_handler(struct urb *urb) { struct midi_runtime *rt = urb->context; @@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip) if (!rt) return -ENOMEM; + rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); + if (!rt->out_buffer) { + kfree(rt); + return -ENOMEM; + } + rt->chip = chip; rt->in_received = usb6fire_midi_in_received; rt->out_buffer[0] = 0x80; /* 'send midi' command */ @@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip) ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); if (ret < 0) { + kfree(rt->out_buffer); kfree(rt); snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); return ret; @@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip) void usb6fire_midi_destroy(struct sfire_chip *chip) { - kfree(chip->midi); + struct midi_runtime *rt = chip->midi; + + kfree(rt->out_buffer); + kfree(rt); chip->midi = NULL; } diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h index c321006e5430..84851b9f5559 100644 --- a/sound/usb/6fire/midi.h +++ b/sound/usb/6fire/midi.h @@ -16,10 +16,6 @@ #include "common.h" -enum { - MIDI_BUFSIZE = 64 -}; - struct midi_runtime { struct sfire_chip *chip; struct snd_rawmidi *instance; @@ -32,7 +28,7 @@ struct midi_runtime { struct snd_rawmidi_substream *out; struct urb out_urb; u8 out_serial; /* serial number of out packet */ - u8 out_buffer[MIDI_BUFSIZE]; + u8 *out_buffer; int buffer_offset; void (*in_received)(struct midi_runtime *rt, u8 *data, int length); diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 3d2551cc10f2..b5eb97fdc842 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c @@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb, urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; } +static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) +{ + int i; + + for (i = 0; i < PCM_N_URBS; i++) { + rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); + if (!rt->out_urbs[i].buffer) + return -ENOMEM; + rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); + if (!rt->in_urbs[i].buffer) + return -ENOMEM; + } + return 0; +} + +static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) +{ + int i; + + for (i = 0; i < PCM_N_URBS; i++) { + kfree(rt->out_urbs[i].buffer); + kfree(rt->in_urbs[i].buffer); + } +} + int usb6fire_pcm_init(struct sfire_chip *chip) { int i; @@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip) if (!rt) return -ENOMEM; + ret = usb6fire_pcm_buffers_init(rt); + if (ret) { + usb6fire_pcm_buffers_destroy(rt); + kfree(rt); + return ret; + } + rt->chip = chip; rt->stream_state = STREAM_DISABLED; rt->rate = ARRAY_SIZE(rates); @@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); if (ret < 0) { + usb6fire_pcm_buffers_destroy(rt); kfree(rt); snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); return ret; @@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); if (ret) { + usb6fire_pcm_buffers_destroy(rt); kfree(rt); snd_printk(KERN_ERR PREFIX "error preallocating pcm buffers.\n"); @@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip) void usb6fire_pcm_destroy(struct sfire_chip *chip) { - kfree(chip->pcm); + struct pcm_runtime *rt = chip->pcm; + + usb6fire_pcm_buffers_destroy(rt); + kfree(rt); chip->pcm = NULL; } diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h index 9b01133ee3fe..f5779d6182c6 100644 --- a/sound/usb/6fire/pcm.h +++ b/sound/usb/6fire/pcm.h @@ -32,7 +32,7 @@ struct pcm_urb { struct urb instance; struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; /* END DO NOT SEPARATE */ - u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; + u8 *buffer; struct pcm_urb *peer; }; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d5438083fd6a..95558ef4a7a0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 1bc45e71f1fe..0df9ede99dfd 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip, if (altsd->bNumEndpoints < 1) return -ENODEV; epd = get_endpoint(alts, 0); - if (!usb_endpoint_xfer_bulk(epd) || + if (!usb_endpoint_xfer_bulk(epd) && !usb_endpoint_xfer_int(epd)) return -ENODEV; switch (USB_ID_VENDOR(chip->usb_id)) { case 0x0499: /* Yamaha */ err = create_yamaha_midi_quirk(chip, iface, driver, alts); - if (err < 0 && err != -ENODEV) + if (err != -ENODEV) return err; break; case 0x0582: /* Roland */ err = create_roland_midi_quirk(chip, iface, driver, alts); - if (err < 0 && err != -ENODEV) + if (err != -ENODEV) return err; break; } |