diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-07-31 15:55:45 +0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-07-31 15:55:45 +0300 |
commit | c2586cfbb905939b79b49a9121fb0a59a5668fd6 (patch) | |
tree | 42bf9a711c59549294b2f2125978f5a7095e16c2 | |
parent | 25a00ac7dc92912f0b1e5e533bf077255c828b02 (diff) | |
parent | ce03b6d2b610b70bb527d14d82c2394adb235e5d (diff) | |
download | linux-c2586cfbb905939b79b49a9121fb0a59a5668fd6.tar.xz |
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
To pick up fixes.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
231 files changed, 2290 insertions, 1094 deletions
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt index 252a05c5d976..c8c4b00ecb94 100644 --- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt @@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of the node is not important. The content of the node is defined in dwc3.txt. Phy documentation is provided in the following places: -Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt +Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY +Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY Example device nodes: diff --git a/MAINTAINERS b/MAINTAINERS index 42a884c1b0f7..f6a9b0842319 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7095,6 +7095,7 @@ F: include/uapi/linux/input.h F: include/uapi/linux/input-event-codes.h F: include/linux/input/ F: Documentation/devicetree/bindings/input/ +F: Documentation/devicetree/bindings/serio/ F: Documentation/input/ INPUT MULTITOUCH (MT) PROTOCOL @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Merciless Moray # *DOCUMENTATION* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 225d1c58d2de..d9c299133111 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = { static int __init gate_vma_init(void) { + vma_init(&gate_vma, NULL); gate_vma.vm_page_prot = PAGE_READONLY_EXEC; return 0; } diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index 39aef4876ed4..8db62cc54a6a 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct mm_struct *mm) memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); + vma_init(&vma, mm); vma.vm_flags = VM_EXEC; - vma.vm_mm = mm; flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index ffdaea7954bb..d87f2d646caa 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,7 +37,9 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma; + + vma_init(&vma, tlb->mm); /* * The ASID allocator will either invalidate the ASID or mark diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f24892a40d2c..c6d80743f4ed 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, static void update_cpu_capabilities(u16 scope_mask) { - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); __update_cpu_capabilities(arm64_errata, scope_mask, "enabling workaround for"); + __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); } static int __enable_cpu_capability(void *arg) @@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, static void __init enable_cpu_capabilities(u16 scope_mask) { - __enable_cpu_capabilities(arm64_features, scope_mask); __enable_cpu_capabilities(arm64_errata, scope_mask); + __enable_cpu_capabilities(arm64_features, scope_mask); } /* diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index ecc6818191df..1854e49aa18a 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; + struct vm_area_struct vma; pte_t orig_pte = huge_ptep_get(ptep); bool valid = pte_valid(orig_pte); unsigned long i, saddr = addr; + vma_init(&vma, mm); + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { pte_t pte = ptep_get_and_clear(mm, addr, ptep); @@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; + struct vm_area_struct vma; unsigned long i, saddr = addr; + vma_init(&vma, mm); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 325cfb3b858a..9abf8a1e7b25 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -611,11 +611,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Make sure we chose the upper bound of sizeof(struct page) - * correctly. + * correctly when sizing the VMEMMAP array. */ BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); +#endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 44f0ac0df308..db89e7306081 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned */ struct vm_area_struct vma; - vma.vm_mm = tlb->mm; + vma_init(&vma, tlb->mm); /* flush the address range from the tlb: */ flush_tlb_range(&vma, start, end); /* now flush the virt. page-table area mapping the address range: */ diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index bdb14a369137..e6c6dfd98de2 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -273,7 +273,7 @@ static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { - gate_vma.vm_mm = NULL; + vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c index 10a405d593df..c782b10ddf50 100644 --- a/arch/mips/ath79/common.c +++ b/arch/mips/ath79/common.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); void ath79_ddr_wb_flush(u32 reg) { - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); /* Flush the DDR write buffer. */ __raw_writel(0x1, flush_reg); diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 8c9cbf13d32a..6054d49e608e 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void) */ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) cpu_wait = NULL; - - /* - * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" - * Enable ExternalSync for sync instruction to take effect - */ - set_c0_config7(MIPS_CONF7_ES); break; #endif } diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 0bc270806ec5..ae461d91cd1f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -681,8 +681,6 @@ #define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) -/* ExternalSync */ -#define MIPS_CONF7_ES (_ULCAST_(1) << 8) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) @@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) -__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index 9632436d74d7..c2e94cf5ecda 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, phys_addr_t size = resource_size(rsrc); *start = fixup_bigphys_addr(rsrc->start, size); - *end = rsrc->start + size; + *end = rsrc->start + size - 1; } diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 380cbf9a40d9..c0a9bcd28356 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u64 imm64; u8 *func; u32 true_cond; + u32 tmp_idx; /* * addrs[] maps a BPF bytecode address into a real offset from @@ -637,11 +638,7 @@ emit_clear: case BPF_STX | BPF_XADD | BPF_W: /* Get EA into TMP_REG_1 */ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); - /* error if EA is not word-aligned */ - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_JMP(exit_addr); + tmp_idx = ctx->idx * 4; /* load value from memory into TMP_REG_2 */ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); /* add value from src_reg into this */ @@ -649,32 +646,16 @@ emit_clear: /* store result back */ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); /* we're done if this succeeded */ - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); - /* otherwise, let's try once more */ - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - /* exit if the store was not successful */ - PPC_LI(b2p[BPF_REG_0], 0); - PPC_BCC(COND_NE, exit_addr); + PPC_BCC_SHORT(COND_NE, tmp_idx); break; /* *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); - /* error if EA is not doubleword-aligned */ - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_JMP(exit_addr); - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); + tmp_idx = ctx->idx * 4; PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_BCC(COND_NE, exit_addr); + PPC_BCC_SHORT(COND_NE, tmp_idx); break; /* diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index e44bb2b2873e..8a1863d9ed53 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -140,7 +140,7 @@ config S390 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FUTEX_CMPXCHG if FUTEX - select HAVE_GCC_PLUGINS + select HAVE_GCC_PLUGINS if BROKEN select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index fa42f895fdde..169c2feda14a 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -106,9 +106,13 @@ define cmd_check_data_rel done endef +# We need to run two commands under "if_changed", so merge them into a +# single invocation. +quiet_cmd_check-and-link-vmlinux = LD $@ + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE - $(call if_changed,check_data_rel) - $(call if_changed,ld) + $(call if_changed,check-and-link-vmlinux) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 73a522d53b53..8ae7ffda8f98 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -981,7 +981,7 @@ ENTRY(\sym) call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ + jmp error_exit .endif END(\sym) .endm @@ -1222,7 +1222,6 @@ END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. - * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC @@ -1269,7 +1268,6 @@ ENTRY(error_entry) * for these here too. */ .Lerror_kernelspace: - incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret @@ -1303,28 +1301,20 @@ ENTRY(error_entry) /* * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET and clear EBX so that - * error_exit knows that we will be returning to user mode. + * as if we faulted immediately after IRET. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp - decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) - -/* - * On entry, EBX is a "return to kernel mode" flag: - * 1: already in kernel mode, don't need SWAPGS - * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode - */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %ebx, %ebx - jnz retint_kernel + testb $3, CS(%rsp) + jz retint_kernel jmp retint_user END(error_exit) diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c9e1e0bef3c3..e17ab885b1e9 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -28,7 +28,7 @@ #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) #define UNCORE_EXTRA_PCI_DEV 0xff -#define UNCORE_EXTRA_PCI_DEV_MAX 3 +#define UNCORE_EXTRA_PCI_DEV_MAX 4 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 87dc0263a2e1..51d7c117e3c7 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void) enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, + BDX_PCI_QPI_PORT2_FILTER, HSWEP_PCI_PCU_3, }; @@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { }, { /* QPI Port 0 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT0_FILTER), }, { /* QPI Port 1 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT1_FILTER), }, { /* QPI Port 2 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + BDX_PCI_QPI_PORT2_FILTER), }, { /* PCU.3 (for Capability registers) */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 9ef5ee03d2d7..159622ee0674 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -43,7 +43,7 @@ asm (".pushsection .text;" "push %rdx;" "mov $0x1,%eax;" "xor %edx,%edx;" - "lock cmpxchg %dl,(%rdi);" + LOCK_PREFIX "cmpxchg %dl,(%rdi);" "cmp $0x1,%al;" "jne .slowpath;" "pop %rdx;" diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2aabd4cb0e3f..adbda5847b14 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void) case 0x04: return 0x02000014; } + if (boot_cpu_data.x86_stepping > 4) + return 0; + return ~0U; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d594690d8b95..6b8f11521c41 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL); + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) return -ENOMEM; cache->objects[cache->nobjs++] = page; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 77873ce700ae..5f2eb3231607 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; - if (sev_active()) + if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO) flags |= _PAGE_ENC; pfn = md->phys_addr >> PAGE_SHIFT; diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 744afdc18cf3..56c44d865f7b 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -16,7 +16,7 @@ static int __init gate_vma_init(void) if (!FIXADDR_USER_START) return 0; - gate_vma.vm_mm = NULL; + vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; diff --git a/block/bio.c b/block/bio.c index 67eff5eddc49..047c5dca6d90 100644 --- a/block/bio.c +++ b/block/bio.c @@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page, EXPORT_SYMBOL(bio_add_page); /** - * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio + * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio * @bio: bio to add pages to * @iter: iov iterator describing the region to be mapped * - * Pins as many pages from *iter and appends them to @bio's bvec array. The + * Pins pages from *iter and appends them to @bio's bvec array. The * pages will have to be released using put_page() when done. + * For multi-segment *iter, this function only adds pages from the + * the next non-empty segment of the iov iterator. */ -int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) +static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { - unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; - size_t offset, diff; + size_t offset; ssize_t size; size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; - nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; + idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; /* * Deep magic below: We need to walk the pinned pages backwards @@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) bio->bi_iter.bi_size += size; bio->bi_vcnt += nr_pages; - diff = (nr_pages * PAGE_SIZE - offset) - size; - while (nr_pages--) { - bv[nr_pages].bv_page = pages[nr_pages]; - bv[nr_pages].bv_len = PAGE_SIZE; - bv[nr_pages].bv_offset = 0; + while (idx--) { + bv[idx].bv_page = pages[idx]; + bv[idx].bv_len = PAGE_SIZE; + bv[idx].bv_offset = 0; } bv[0].bv_offset += offset; bv[0].bv_len -= offset; - if (diff) - bv[bio->bi_vcnt - 1].bv_len -= diff; + bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size; iov_iter_advance(iter, size); return 0; } + +/** + * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio + * @bio: bio to add pages to + * @iter: iov iterator describing the region to be mapped + * + * Pins pages from *iter and appends them to @bio's bvec array. The + * pages will have to be released using put_page() when done. + * The function tries, but does not guarantee, to pin as many pages as + * fit into the bio, or are requested in *iter, whatever is smaller. + * If MM encounters an error pinning the requested pages, it stops. + * Error is returned only if 0 pages could be pinned. + */ +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) +{ + unsigned short orig_vcnt = bio->bi_vcnt; + + do { + int ret = __bio_iov_iter_get_pages(bio, iter); + + if (unlikely(ret)) + return bio->bi_vcnt > orig_vcnt ? 0 : ret; + + } while (iov_iter_count(iter) && !bio_full(bio)); + + return 0; +} EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); static void submit_bio_wait_endio(struct bio *bio) @@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors, bio_integrity_trim(split); bio_advance(bio, split->bi_iter.bi_size); + bio->bi_iter.bi_done = 0; if (bio_flagged(bio, BIO_TRACE_COMPLETION)) bio_set_flag(split, BIO_TRACE_COMPLETION); diff --git a/block/blk-mq.c b/block/blk-mq.c index 95919268564b..654b0dc7e001 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq) bool shared = false; int cpu; - if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) != - MQ_RQ_IN_FLIGHT) + if (!blk_mq_mark_complete(rq)) return; - if (rq->internal_tag != -1) blk_mq_sched_completed_request(rq); diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index bc5f05906bd1..ee840be150b5 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) status = acpi_ps_create_op(walk_state, aml_op_start, &op); if (ACPI_FAILURE(status)) { + /* + * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by + * executing it as a control method. However, if we encounter + * an error while loading the table, we need to keep trying to + * load the table rather than aborting the table load. Set the + * status to AE_OK to proceed with the table load. + */ + if ((walk_state-> + parse_flags & ACPI_PARSE_MODULE_LEVEL) + && status == AE_ALREADY_EXISTS) { + status = AE_OK; + } if (status == AE_CTRL_PARSE_CONTINUE) { continue; } @@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) acpi_ps_next_parse_state(walk_state, op, status); if (status == AE_CTRL_PENDING) { status = AE_OK; + } else + if ((walk_state-> + parse_flags & ACPI_PARSE_MODULE_LEVEL) + && ACPI_FAILURE(status)) { + /* + * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by + * executing it as a control method. However, if we encounter + * an error while loading the table, we need to keep trying to + * load the table rather than aborting the table load. Set the + * status to AE_OK to proceed with the table load. If we get a + * failure at this point, it means that the dispatcher got an + * error while processing Op (most likely an AML operand error. + */ + status = AE_OK; } } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 1435d7281c66..6ebcd65d64b6 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -434,14 +434,6 @@ re_probe: goto probe_failed; } - /* - * Ensure devices are listed in devices_kset in correct order - * It's important to move Dev to the end of devices_kset before - * calling .probe, because it could be recursive and parent Dev - * should always go first - */ - devices_kset_move_last(dev); - if (dev->bus->probe) { ret = dev->bus->probe(dev); if (ret) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 74a05561b620..3fb95c8d9fd8 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -112,12 +112,16 @@ struct nbd_device { struct task_struct *task_setup; }; +#define NBD_CMD_REQUEUED 1 + struct nbd_cmd { struct nbd_device *nbd; + struct mutex lock; int index; int cookie; - struct completion send_complete; blk_status_t status; + unsigned long flags; + u32 cmd_cookie; }; #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd) return disk_to_dev(nbd->disk); } +static void nbd_requeue_cmd(struct nbd_cmd *cmd) +{ + struct request *req = blk_mq_rq_from_pdu(cmd); + + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) + blk_mq_requeue_request(req, true); +} + +#define NBD_COOKIE_BITS 32 + +static u64 nbd_cmd_handle(struct nbd_cmd *cmd) +{ + struct request *req = blk_mq_rq_from_pdu(cmd); + u32 tag = blk_mq_unique_tag(req); + u64 cookie = cmd->cmd_cookie; + + return (cookie << NBD_COOKIE_BITS) | tag; +} + +static u32 nbd_handle_to_tag(u64 handle) +{ + return (u32)handle; +} + +static u32 nbd_handle_to_cookie(u64 handle) +{ + return (u32)(handle >> NBD_COOKIE_BITS); +} + static const char *nbdcmd_to_ascii(int cmd) { switch (cmd) { @@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } config = nbd->config; + if (!mutex_trylock(&cmd->lock)) + return BLK_EH_RESET_TIMER; + if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out, retrying (%d/%d alive)\n", @@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - blk_mq_requeue_request(req, true); + mutex_unlock(&cmd->lock); + nbd_requeue_cmd(cmd); nbd_config_put(nbd); return BLK_EH_DONE; } @@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } set_bit(NBD_TIMEDOUT, &config->runtime_flags); cmd->status = BLK_STS_IOERR; + mutex_unlock(&cmd->lock); sock_shutdown(nbd); nbd_config_put(nbd); done: @@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) struct iov_iter from; unsigned long size = blk_rq_bytes(req); struct bio *bio; + u64 handle; u32 type; u32 nbd_cmd_flags = 0; - u32 tag = blk_mq_unique_tag(req); int sent = nsock->sent, skip = 0; iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); @@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) goto send_pages; } iov_iter_advance(&from, sent); + } else { + cmd->cmd_cookie++; } cmd->index = index; cmd->cookie = nsock->cookie; @@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } - memcpy(request.handle, &tag, sizeof(tag)); + handle = nbd_cmd_handle(cmd); + memcpy(request.handle, &handle, sizeof(handle)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", req, nbdcmd_to_ascii(type), @@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->pending = req; nsock->sent = sent; } + set_bit(NBD_CMD_REQUEUED, &cmd->flags); return BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -541,6 +583,7 @@ send_pages: */ nsock->pending = req; nsock->sent = sent; + set_bit(NBD_CMD_REQUEUED, &cmd->flags); return BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), @@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct nbd_reply reply; struct nbd_cmd *cmd; struct request *req = NULL; + u64 handle; u16 hwq; u32 tag; struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; struct iov_iter to; + int ret = 0; reply.magic = 0; iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); @@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) return ERR_PTR(-EPROTO); } - memcpy(&tag, reply.handle, sizeof(u32)); - + memcpy(&handle, reply.handle, sizeof(handle)); + tag = nbd_handle_to_tag(handle); hwq = blk_mq_unique_tag_to_hwq(tag); if (hwq < nbd->tag_set.nr_hw_queues) req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], @@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) return ERR_PTR(-ENOENT); } cmd = blk_mq_rq_to_pdu(req); + + mutex_lock(&cmd->lock); + if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { + dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", + req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); + ret = -ENOENT; + goto out; + } + if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { + dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", + req); + ret = -ENOENT; + goto out; + } if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); cmd->status = BLK_STS_IOERR; - return cmd; + goto out; } dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); @@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) if (nbd_disconnected(config) || config->num_connections <= 1) { cmd->status = BLK_STS_IOERR; - return cmd; + goto out; } - return ERR_PTR(-EIO); + ret = -EIO; + goto out; } dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", req, bvec.bv_len); } - } else { - /* See the comment in nbd_queue_rq. */ - wait_for_completion(&cmd->send_complete); } - return cmd; +out: + mutex_unlock(&cmd->lock); + return ret ? ERR_PTR(ret) : cmd; } static void recv_work(struct work_struct *work) @@ -805,7 +864,7 @@ again: */ blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { - blk_mq_requeue_request(req, true); + nbd_requeue_cmd(cmd); ret = 0; goto out; } @@ -818,7 +877,7 @@ again: dev_err_ratelimited(disk_to_dev(nbd->disk), "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); - blk_mq_requeue_request(req, true); + nbd_requeue_cmd(cmd); ret = 0; } out: @@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * that the server is misbehaving (or there was an error) before we're * done sending everything over the wire. */ - init_completion(&cmd->send_complete); + mutex_lock(&cmd->lock); + clear_bit(NBD_CMD_REQUEUED, &cmd->flags); /* We can be called directly from the user space process, which means we * could possibly have signals pending so our sendmsg will fail. In @@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, ret = BLK_STS_IOERR; else if (!ret) ret = BLK_STS_OK; - complete(&cmd->send_complete); + mutex_unlock(&cmd->lock); return ret; } @@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->nbd = set->driver_data; + cmd->flags = 0; + mutex_init(&cmd->lock); return 0; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index ffeb60d3434c..df66a9dd0aae 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma) #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); + vma_set_anonymous(vma); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index cd888d4ee605..bd449ad52442 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1895,14 +1895,22 @@ static int write_pool(struct entropy_store *r, const char __user *buffer, size_t count) { size_t bytes; - __u32 buf[16]; + __u32 t, buf[16]; const char __user *p = buffer; while (count > 0) { + int b, i = 0; + bytes = min(count, sizeof(buf)); if (copy_from_user(&buf, p, bytes)) return -EFAULT; + for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { + if (!arch_get_random_int(&t)) + break; + buf[i] ^= t; + } + count -= bytes; p += bytes; diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 38b366b00c57..7b70a074095d 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c @@ -24,7 +24,7 @@ #define ASPEED_MPLL_PARAM 0x20 #define ASPEED_HPLL_PARAM 0x24 #define AST2500_HPLL_BYPASS_EN BIT(20) -#define AST2400_HPLL_STRAPPED BIT(18) +#define AST2400_HPLL_PROGRAMMED BIT(18) #define AST2400_HPLL_BYPASS_EN BIT(17) #define ASPEED_MISC_CTRL 0x2c #define UART_DIV13_EN BIT(12) @@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = { [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ - [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ - [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */ + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ @@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw) { struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); u32 clk = BIT(gate->clock_idx); + u32 rst = BIT(gate->reset_idx); u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; u32 reg; + /* + * If the IP is in reset, treat the clock as not enabled, + * this happens with some clocks such as the USB one when + * coming from cold reset. Without this, aspeed_clk_enable() + * will fail to lift the reset. + */ + if (gate->reset_idx >= 0) { + regmap_read(gate->map, ASPEED_RESET_CTRL, ®); + if (reg & rst) + return 0; + } + regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); return ((reg & clk) == enval) ? 1 : 0; @@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver); static void __init aspeed_ast2400_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, freq, div; + u32 val, div, clkin, hpll; + const u16 hpll_rates[][4] = { + {384, 360, 336, 408}, + {400, 375, 350, 425}, + }; + int rate; /* * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by * strapping */ regmap_read(map, ASPEED_STRAP, &val); - if (val & CLKIN_25MHZ_EN) - freq = 25000000; - else if (val & AST2400_CLK_SOURCE_SEL) - freq = 48000000; - else - freq = 24000000; - hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); - pr_debug("clkin @%u MHz\n", freq / 1000000); + rate = (val >> 8) & 3; + if (val & CLKIN_25MHZ_EN) { + clkin = 25000000; + hpll = hpll_rates[1][rate]; + } else if (val & AST2400_CLK_SOURCE_SEL) { + clkin = 48000000; + hpll = hpll_rates[0][rate]; + } else { + clkin = 24000000; + hpll = hpll_rates[0][rate]; + } + hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin); + pr_debug("clkin @%u MHz\n", clkin / 1000000); /* * High-speed PLL clock derived from the crystal. This the CPU clock, - * and we assume that it is enabled + * and we assume that it is enabled. It can be configured through the + * HPLL_PARAM register, or set to a specified frequency by strapping. */ regmap_read(map, ASPEED_HPLL_PARAM, &val); - WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); - aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); + if (val & AST2400_HPLL_PROGRAMMED) + hw = aspeed_ast2400_calc_pll("hpll", val); + else + hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0, + hpll * 1000000); + + aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw; /* * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 9760b526ca31..e2ed078abd90 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -24,7 +24,6 @@ #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/clkdev.h> -#include <linux/stringify.h> #include "clk.h" @@ -2559,7 +2558,7 @@ static const struct { unsigned long flag; const char *name; } clk_flags[] = { -#define ENTRY(f) { f, __stringify(f) } +#define ENTRY(f) { f, #f } ENTRY(CLK_SET_RATE_GATE), ENTRY(CLK_SET_PARENT_GATE), ENTRY(CLK_SET_RATE_PARENT), diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c index 58f546e04807..e4cf96ba704e 100644 --- a/drivers/clk/meson/clk-audio-divider.c +++ b/drivers/clk/meson/clk-audio-divider.c @@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw, struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); unsigned long divider; - divider = meson_parm_read(clk->map, &adiv->div); + divider = meson_parm_read(clk->map, &adiv->div) + 1; return DIV_ROUND_UP_ULL((u64)parent_rate, divider); } diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 240658404367..177fffb9ebef 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div2_div" }, .num_parents = 1, + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 6860bd5a37c5..44e4e27eddad 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -35,6 +35,7 @@ #define CLK_SEL 0x10 #define CLK_DIS 0x14 +#define ARMADA_37XX_DVFS_LOAD_1 1 #define LOAD_LEVEL_NR 4 #define ARMADA_37XX_NB_L0L1 0x18 @@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; } +/* + * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz + * respectively) to L0 frequency (1.2 Ghz) requires a significant + * amount of time to let VDD stabilize to the appropriate + * voltage. This amount of time is large enough that it cannot be + * covered by the hardware countdown register. Due to this, the CPU + * might start operating at L0 before the voltage is stabilized, + * leading to CPU stalls. + * + * To work around this problem, we prevent switching directly from the + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 + * frequency in-between. The sequence therefore becomes: + * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ) + * 2. Sleep 20ms for stabling VDD voltage + * 3. Then switch from L1(600MHZ) to L0(1200Mhz). + */ +static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base) +{ + unsigned int cur_level; + + if (rate != 1200 * 1000 * 1000) + return; + + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; + if (cur_level <= ARMADA_37XX_DVFS_LOAD_1) + return; + + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, + ARMADA_37XX_NB_CPU_LOAD_MASK, + ARMADA_37XX_DVFS_LOAD_1); + msleep(20); +} + static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, */ reg = ARMADA_37XX_NB_CPU_LOAD; mask = ARMADA_37XX_NB_CPU_LOAD_MASK; + + clk_pm_cpu_set_rate_wa(rate, base); + regmap_update_bits(base, reg, mask, load_level); return rate; diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 9f35b3fe1d97..ff8d66fd94e6 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = { static struct clk_branch gcc_ufs_tx_symbol_0_clk = { .halt_reg = 0x75018, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x75018, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 1a25ee4f3658..4b20d1b67a1b 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = { .name = "mmagic_bimc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = ALWAYS_ON, }; static struct gdsc mmagic_video_gdsc = { diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index 29389accf3e9..efc9a7ae4857 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = { static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, + {} }; /* diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c index d3cf9502e7e7..58faeb1cef63 100644 --- a/drivers/gpio/gpio-uniphier.c +++ b/drivers/gpio/gpio-uniphier.c @@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); fwspec.param_count = 2; fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; - fwspec.param[1] = IRQ_TYPE_NONE; + /* + * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH + * temporarily. Anyway, ->irq_set_type() will override it later. + */ + fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; return irq_create_fwspec_mapping(&fwspec); } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 28d968088131..53a14ee8ad6d 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np, * Note that active low is the default. */ if (IS_ENABLED(CONFIG_REGULATOR) && - (of_device_is_compatible(np, "reg-fixed-voltage") || + (of_device_is_compatible(np, "regulator-fixed") || + of_device_is_compatible(np, "reg-fixed-voltage") || of_device_is_compatible(np, "regulator-gpio"))) { /* * The regulator GPIO handles are specified such that the diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 52f3b91d14fd..71e1aa54f774 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -652,6 +652,7 @@ enum intel_sbi_destination { #define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) #define QUIRK_INCREASE_T12_DELAY (1<<6) +#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) struct intel_fbdev; struct intel_fbc_work; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f4a8598a2d39..fed26d6e4e27 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); + + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); + /* Quirk time at 100ms for reliable operation */ + msleep(100); + } } int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2cc6faa1daa8..dec0d60921bf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); + intel_ddi_disable_transcoder_func(old_crtc_state); if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); @@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev) DRM_INFO("Applying T12 delay quirk\n"); } +/* + * GeminiLake NUC HDMI outputs require additional off time + * this allows the onboard retimer to correctly sync to signal + */ +static void quirk_increase_ddi_disabled_time(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + DRM_INFO("Applying Increase DDI Disabled quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = { /* Toshiba Satellite P50-C-18C */ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, + + /* GeminiLake NUC */ + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + /* ASRock ITX*/ + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0361130500a6..b8eefbffc77d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc, void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder); +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); struct intel_encoder * diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 56dd7a9a8e25..dd5312b02a8d 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imx_ldb->regmap); } + /* disable LDB by resetting the control register to POR default */ + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); + imx_ldb->dev = dev; if (of_id) @@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (ret || i < 0 || i > 1) return -EINVAL; + if (!of_device_is_available(child)) + continue; + if (dual && i > 0) { dev_warn(dev, "dual-channel mode, ignoring second output\n"); continue; } - if (!of_device_is_available(child)) - continue; - channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index caa05b0702e1..5450a2db1219 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, break; case V4L2_MBUS_BT656: csicfg->ext_vsync = 0; - if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) + if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) || + mbus_fmt->field == V4L2_FIELD_ALTERNATE) csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; else csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 75d6ab177055..7379043711df 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) /* * It's not always possible to have 1 to 2 ratio when d=7, so fall back * to minimal possible clkh in this case. + * + * Note: + * CLKH is not allowed to be 0, in this case I2C clock is not generated + * at all */ - if (clk >= clkl + d) { + if (clk > clkl + d) { clkh = clk - clkl - d; clkl -= d; } else { - clkh = 0; + clkh = 1; clkl = clk - (d << 1); } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 0207e194f84b..498c5e891649 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, goto err_desc; } + reinit_completion(&dma->cmd_complete); txdesc->callback = i2c_imx_dma_callback; txdesc->callback_param = i2c_imx; if (dma_submit_error(dmaengine_submit(txdesc))) { @@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, * The first byte must be transmitted by the CPU. */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); @@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, if (result) return result; - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); @@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, "gpio"); rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); - rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH); + rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 5e310efd9446..3c1c817f6968 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -32,6 +32,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> /* register offsets */ @@ -111,8 +112,9 @@ #define ID_ARBLOST (1 << 3) #define ID_NACK (1 << 4) /* persistent flags */ +#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */ #define ID_P_PM_BLOCKED (1 << 31) -#define ID_P_MASK ID_P_PM_BLOCKED +#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA) enum rcar_i2c_type { I2C_RCAR_GEN1, @@ -141,6 +143,8 @@ struct rcar_i2c_priv { struct dma_chan *dma_rx; struct scatterlist sg; enum dma_data_direction dma_direction; + + struct reset_control *rstc; }; #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) @@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), sg_dma_len(&priv->sg), priv->dma_direction); + /* Gen3 can only do one RXDMA per transfer and we just completed it */ + if (priv->devtype == I2C_RCAR_GEN3 && + priv->dma_direction == DMA_FROM_DEVICE) + priv->flags |= ID_P_NO_RXDMA; + priv->dma_direction = DMA_NONE; } @@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv) unsigned char *buf; int len; - /* Do not use DMA if it's not available or for messages < 8 bytes */ - if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE)) + /* Do various checks to see if DMA is feasible at all */ + if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) || + (read && priv->flags & ID_P_NO_RXDMA)) return; if (read) { @@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv) } } +/* I2C is a special case, we need to poll the status of a reset */ +static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv) +{ + int i, ret; + + ret = reset_control_reset(priv->rstc); + if (ret) + return ret; + + for (i = 0; i < LOOP_TIMEOUT; i++) { + ret = reset_control_status(priv->rstc); + if (ret == 0) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + static int rcar_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) @@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, pm_runtime_get_sync(dev); + /* Gen3 needs a reset before allowing RXDMA once */ + if (priv->devtype == I2C_RCAR_GEN3) { + priv->flags |= ID_P_NO_RXDMA; + if (!IS_ERR(priv->rstc)) { + ret = rcar_i2c_do_reset(priv); + if (ret == 0) + priv->flags &= ~ID_P_NO_RXDMA; + } + } + rcar_i2c_init(priv); ret = rcar_i2c_bus_barrier(priv); @@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + if (priv->devtype == I2C_RCAR_GEN3) { + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (!IS_ERR(priv->rstc)) { + ret = reset_control_status(priv->rstc); + if (ret < 0) + priv->rstc = ERR_PTR(-ENOTSUPP); + } + } + /* Stay always active when multi-master to keep arbitration working */ if (of_property_read_bool(dev->of_node, "multi-master")) priv->flags |= ID_P_PM_BLOCKED; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 301285c54603..15c95aaa484c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { - rt_mutex_lock(&adapter->bus_lock); + rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } /** diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 300ab4b672e4..29646aa6132e 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags) struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); if (!(flags & I2C_LOCK_ROOT_ADAPTER)) return; i2c_lock_bus(parent, flags); @@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter, struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); i2c_lock_bus(parent, flags); } diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 1f9cd7d8b7ad..f5ae24865355 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0611", 0 }, { "ELAN0612", 0 }, { "ELAN0618", 0 }, + { "ELAN061D", 0 }, + { "ELAN0622", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index b353d494ad40..136f6e7bf797 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), }, }, + { + /* Lenovo LaVie Z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + }, + }, { } }; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 98663c50ded0..4d5d01cb8141 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", - newval->string); - /* disable arp monitoring */ - bond->params.arp_interval = 0; - /* set miimon to default value */ - bond->params.miimon = BOND_DEFAULT_MIIMON; - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", - bond->params.miimon); + if (!bond_mode_uses_arp(newval->value)) { + if (bond->params.arp_interval) { + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", + newval->string); + /* disable arp monitoring */ + bond->params.arp_interval = 0; + } + + if (!bond->params.miimon) { + /* set miimon to default value */ + bond->params.miimon = BOND_DEFAULT_MIIMON; + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", + bond->params.miimon); + } } if (newval->value == BOND_MODE_ALB) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index b397a33f3d32..9b449400376b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv) int err; err = pm_runtime_get_sync(priv->device); - if (err) + if (err < 0) { pm_runtime_put_noidle(priv->device); + return err; + } - return err; + return 0; } static void m_can_clk_stop(struct m_can_priv *priv) @@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev) } else { /* Version 3.1.x or 3.2.x */ - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | + CCCR_NISO); /* Only 3.2.x has NISO Bit implemented */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev) priv->can.clock.freq = clk_get_rate(cclk); priv->mram_base = mram_addr; - m_can_of_parse_mram(priv, mram_config_vals); - platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev) goto clk_disable; } + m_can_of_parse_mram(priv, mram_config_vals); + devm_can_led_init(dev); of_can_transceiver(dev); @@ -1687,8 +1690,6 @@ failed_ret: return ret; } -/* TODO: runtime PM with power down or sleep mode */ - static __maybe_unused int m_can_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); @@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev) pinctrl_pm_select_default_state(dev); - m_can_init_ram(priv); - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { @@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev) if (ret) return ret; + m_can_init_ram(priv); m_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index c7427bdd3a4b..2949a381a94d 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, return 0; } cdm = of_iomap(np_cdm, 0); + if (!cdm) { + of_node_put(np_cdm); + dev_err(&ofdev->dev, "can't map clock node!\n"); + return 0; + } if (in_8(&cdm->ipb_clk_sel) & 0x1) freq *= 2; diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b9e28578bc7b..455a3797a200 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ + ((u32)(y) << 16) | \ + ((u32)(z) << 8)) + /* System Control Registers Bits */ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ @@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, hw_ver_major, hw_ver_minor, hw_ver_sub); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and + * 64-bit logical addresses: this workaround forces usage of 32-bit + * DMA addresses only when such a fw is detected. + */ + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < + PCIEFD_FW_VERSION(3, 3, 0)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + dev_warn(&pdev->dev, + "warning: can't set DMA mask %llxh (err %d)\n", + DMA_BIT_MASK(32), err); + } +#endif + /* stop system clock */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, PCIEFD_REG_SYS_CTL_CLR); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 89aec07c225f..5a24039733ef 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2,6 +2,7 @@ * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. + * Copyright (C) 2017 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. @@ -25,8 +26,10 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/skbuff.h> +#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> @@ -101,7 +104,7 @@ enum xcan_reg { #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ @@ -118,6 +121,7 @@ enum xcan_reg { /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. + * @tx_lock: Lock for synchronizing TX interrupt handling * @tx_head: Tx CAN packets ready to send on the queue * @tx_tail: Tx CAN packets successfully sended on the queue * @tx_max: Maximum number packets the driver can send @@ -132,6 +136,7 @@ enum xcan_reg { */ struct xcan_priv { struct can_priv can; + spinlock_t tx_lock; unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +#define XCAN_CAP_WATERMARK 0x0001 +struct xcan_devtype_data { + unsigned int caps; +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) usleep_range(500, 10000); } + /* reset clears FIFOs */ + priv->tx_head = 0; + priv->tx_tail = 0; + return 0; } @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; + unsigned long flags; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_head++; /* Write the Frame to Xilinx CAN TX FIFO */ @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) stats->tx_bytes += cf->can_dlc; } + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ + if (priv->tx_max > 1) + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); + /* Check if the TX buffer is full */ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; } @@ -530,6 +554,123 @@ static int xcan_rx(struct net_device *ndev) } /** + * xcan_current_error_state - Get current error state from HW + * @ndev: Pointer to net_device structure + * + * Checks the current CAN error state from the HW. Note that this + * only checks for ERROR_PASSIVE and ERROR_WARNING. + * + * Return: + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE + * otherwise. + */ +static enum can_state xcan_current_error_state(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); + + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) + return CAN_STATE_ERROR_PASSIVE; + else if (status & XCAN_SR_ERRWRN_MASK) + return CAN_STATE_ERROR_WARNING; + else + return CAN_STATE_ERROR_ACTIVE; +} + +/** + * xcan_set_error_state - Set new CAN error state + * @ndev: Pointer to net_device structure + * @new_state: The new CAN state to be set + * @cf: Error frame to be populated or NULL + * + * Set new CAN error state for the device, updating statistics and + * populating the error frame if given. + */ +static void xcan_set_error_state(struct net_device *ndev, + enum can_state new_state, + struct can_frame *cf) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); + u32 txerr = ecr & XCAN_ECR_TEC_MASK; + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; + + priv->can.state = new_state; + + if (cf) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + switch (new_state) { + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (cf) + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (cf) + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + break; + case CAN_STATE_ERROR_ACTIVE: + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + break; + default: + /* non-ERROR states are handled elsewhere */ + WARN_ON(1); + break; + } +} + +/** + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX + * @ndev: Pointer to net_device structure + * + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if + * the performed RX/TX has caused it to drop to a lesser state and set + * the interface state accordingly. + */ +static void xcan_update_error_state_after_rxtx(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + enum can_state old_state = priv->can.state; + enum can_state new_state; + + /* changing error state due to successful frame RX/TX can only + * occur from these states + */ + if (old_state != CAN_STATE_ERROR_WARNING && + old_state != CAN_STATE_ERROR_PASSIVE) + return; + + new_state = xcan_current_error_state(ndev); + + if (new_state != old_state) { + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(ndev, &cf); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + + if (skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + } +} + +/** * xcan_err_interrupt - error frame Isr * @ndev: net_device pointer * @isr: interrupt status register value @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; - u32 err_status, status, txerr = 0, rxerr = 0; + u32 err_status; skb = alloc_can_err_skb(ndev, &cf); err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); - status = priv->read_reg(priv, XCAN_SR_OFFSET); if (isr & XCAN_IXR_BSOFF_MASK) { priv->can.state = CAN_STATE_BUS_OFF; @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - priv->can.can_stats.error_passive++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (rxerr > 127) ? - CAN_ERR_CRTL_RX_PASSIVE : - CAN_ERR_CRTL_TX_PASSIVE; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } + } else { + enum can_state new_state = xcan_current_error_state(ndev); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); } /* Check for Arbitration lost interrupt */ @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) isr = priv->read_reg(priv, XCAN_ISR_OFFSET); while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - if (isr & XCAN_IXR_RXOK_MASK) { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXOK_MASK); - work_done += xcan_rx(ndev); - } else { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXNEMP_MASK); - break; - } + work_done += xcan_rx(ndev); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - if (work_done) + if (work_done) { can_led_event(ndev, CAN_LED_EVENT_RX); + xcan_update_error_state_after_rxtx(ndev); + } if (work_done < quota) { napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); + ier |= XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; + unsigned int frames_in_fifo; + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ + unsigned long flags; + int retries = 0; + + /* Synchronize with xmit as we need to know the exact number + * of frames in the FIFO to stay in sync due to the TXFEMP + * handling. + * This also prevents a race between netif_wake_queue() and + * netif_stop_queue(). + */ + spin_lock_irqsave(&priv->tx_lock, flags); + + frames_in_fifo = priv->tx_head - priv->tx_tail; + + if (WARN_ON_ONCE(frames_in_fifo == 0)) { + /* clear TXOK anyway to avoid getting back here */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return; + } + + /* Check if 2 frames were sent (TXOK only means that at least 1 + * frame was sent). + */ + if (frames_in_fifo > 1) { + WARN_ON(frames_in_fifo > priv->tx_max); + + /* Synchronize TXOK and isr so that after the loop: + * (1) isr variable is up-to-date at least up to TXOK clear + * time. This avoids us clearing a TXOK of a second frame + * but not noticing that the FIFO is now empty and thus + * marking only a single frame as sent. + * (2) No TXOK is left. Having one could mean leaving a + * stray TXOK as we might process the associated frame + * via TXFEMP handling as we read TXFEMP *after* TXOK + * clear to satisfy (1). + */ + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + } - while ((priv->tx_head - priv->tx_tail > 0) && - (isr & XCAN_IXR_TXOK_MASK)) { + if (isr & XCAN_IXR_TXFEMP_MASK) { + /* nothing in FIFO anymore */ + frames_sent = frames_in_fifo; + } + } else { + /* single frame in fifo, just clear TXOK */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + } + + while (frames_sent--) { can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); priv->tx_tail++; stats->tx_packets++; - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + can_led_event(ndev, CAN_LED_EVENT_TX); + xcan_update_error_state_after_rxtx(ndev); } /** @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; + u32 isr_errors; /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) xcan_tx_interrupt(ndev, isr); /* Check for the type of error interrupt and Processing it */ - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | - XCAN_IXR_ARBLST_MASK)); + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); + if (isr_errors) { + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { + if (isr & XCAN_IXR_RXNEMP_MASK) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); + ier &= ~XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 ier; /* Disable interrupts and leave the can in configuration mode */ - ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~XCAN_INTR_ALL; - priv->write_reg(priv, XCAN_IER_OFFSET, ier); - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); + set_reset_mode(ndev); priv->can.state = CAN_STATE_STOPPED; } @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { */ static int __maybe_unused xcan_suspend(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_suspend(dev); + struct net_device *ndev = dev_get_drvdata(dev); - return 0; + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + xcan_chip_stop(ndev); + } + + return pm_runtime_force_suspend(dev); } /** @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) */ static int __maybe_unused xcan_resume(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_resume(dev); + struct net_device *ndev = dev_get_drvdata(dev); + int ret; - return 0; + ret = pm_runtime_force_resume(dev); + if (ret) { + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); + return ret; + } + + if (netif_running(ndev)) { + ret = xcan_chip_start(ndev); + if (ret) { + dev_err(dev, "xcan_chip_start failed on resume\n"); + return ret; + } + + netif_device_attach(ndev); + netif_start_queue(ndev); + } + return 0; } /** @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); - if (netif_running(ndev)) { - netif_stop_queue(ndev); - netif_device_detach(ndev); - } - - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); - priv->can.state = CAN_STATE_SLEEPING; - clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); int ret; - u32 isr, status; ret = clk_prepare_enable(priv->bus_clk); if (ret) { @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) return ret; } - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); - status = priv->read_reg(priv, XCAN_SR_OFFSET); - - if (netif_running(ndev)) { - if (isr & XCAN_IXR_BSOFF_MASK) { - priv->can.state = CAN_STATE_BUS_OFF; - priv->write_reg(priv, XCAN_SRR_OFFSET, - XCAN_SRR_RESET_MASK); - } else if ((status & XCAN_SR_ESTAT_MASK) == - XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - } else { - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - netif_device_attach(ndev); - netif_start_queue(ndev); - } - return 0; } @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) }; +static const struct xcan_devtype_data xcan_zynq_data = { + .caps = XCAN_CAP_WATERMARK, +}; + +/* Match table for OF platform binding */ +static const struct of_device_id xcan_of_match[] = { + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, + { .compatible = "xlnx,axi-can-1.00.a", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, xcan_of_match); + /** * xcan_probe - Platform registration call * @pdev: Handle to the platform device structure @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) struct resource *res; /* IO mem resources */ struct net_device *ndev; struct xcan_priv *priv; + const struct of_device_id *of_id; + int caps = 0; void __iomem *addr; - int ret, rx_max, tx_max; + int ret, rx_max, tx_max, tx_fifo_depth; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) goto err; } - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &tx_fifo_depth); if (ret < 0) goto err; @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) if (ret < 0) goto err; + of_id = of_match_device(xcan_of_match, &pdev->dev); + if (of_id) { + const struct xcan_devtype_data *devtype_data = of_id->data; + + if (devtype_data) + caps = devtype_data->caps; + } + + /* There is no way to directly figure out how many frames have been + * sent when the TXOK interrupt is processed. If watermark programming + * is supported, we can have 2 frames in the FIFO and use TXFEMP + * to determine if 1 or 2 frames have been sent. + * Theoretically we should be able to use TXFWMEMP to determine up + * to 3 frames, but it seems that after putting a second frame in the + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was + * sent), which is not a sensible state - possibly TXFWMEMP is not + * completely synchronized with the rest of the bits? + */ + if (caps & XCAN_CAP_WATERMARK) + tx_max = min(tx_fifo_depth, 2); + else + tx_max = 1; + /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; + spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ ndev->irq = platform_get_irq(pdev, 0); @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, - priv->tx_max); + tx_fifo_depth, priv->tx_max); return 0; @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) return 0; } -/* Match table for OF platform binding */ -static const struct of_device_id xcan_of_match[] = { - { .compatible = "xlnx,zynq-can-1.0", }, - { .compatible = "xlnx,axi-can-1.00.a", }, - { /* end of list */ }, -}; -MODULE_DEVICE_TABLE(of, xcan_of_match); - static struct platform_driver xcan_driver = { .probe = xcan_probe, .remove = xcan_remove, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 437cd6eb4faa..9ef07a06aceb 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = { .xlate = irq_domain_xlate_twocell, }; +/* To be called with reg_lock held */ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) { int irq, virq; @@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) { - mv88e6xxx_g1_irq_free_common(chip); - + /* + * free_irq must be called without reg_lock taken because the irq + * handler takes this lock, too. + */ free_irq(chip->irq, chip); + + mutex_lock(&chip->reg_lock); + mv88e6xxx_g1_irq_free_common(chip); + mutex_unlock(&chip->reg_lock); } static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) @@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip) static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) { - mv88e6xxx_g1_irq_free_common(chip); - kthread_cancel_delayed_work_sync(&chip->irq_poll_work); kthread_destroy_worker(chip->kworker); + + mutex_lock(&chip->reg_lock); + mv88e6xxx_g1_irq_free_common(chip); + mutex_unlock(&chip->reg_lock); } int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) @@ -4506,12 +4515,10 @@ out_g2_irq: if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); out_g1_irq: - mutex_lock(&chip->reg_lock); if (chip->irq > 0) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); - mutex_unlock(&chip->reg_lock); out: if (pdata) dev_put(pdata->netdev); @@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); - mutex_lock(&chip->reg_lock); if (chip->irq > 0) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); - mutex_unlock(&chip->reg_lock); } static const struct of_device_id mv88e6xxx_of_match[] = { diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 5b7658bcf020..5c3ef9fc8207 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -32,7 +32,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !PPC32 ---help--- If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y here. diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index f273af136fc7..9e5cf5583c87 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -44,7 +44,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on ISA && ISA_DMA_API && !ARM + depends on ISA && ISA_DMA_API && !ARM && !PPC32 ---help--- If you have a network (Ethernet) card of this type, say Y here. Some LinkSys cards are of this type. @@ -138,7 +138,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" - depends on ISA && ISA_DMA_API && !ARM + depends on ISA && ISA_DMA_API && !ARM && !PPC32 ---help--- If you have a network (Ethernet) card of this type, say Y here. diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 94270f654b3b..7087b88550db 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) skb = build_skb(page_address(page) + adapter->rx_page_offset, adapter->rx_frag_size); if (likely(skb)) { + skb_reserve(skb, NET_SKB_PAD); adapter->rx_page_offset += adapter->rx_frag_size; if (adapter->rx_page_offset >= PAGE_SIZE) adapter->rx_page = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index da18aa239acb..a4a90b6cdb46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_rss(bp, &bp->rss_conf_obj, false, + true); } else if ((info->flow_type == UDP_V6_FLOW) && (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_rss(bp, &bp->rss_conf_obj, false, + true); } return 0; @@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; } - return bnx2x_config_rss_eth(bp, false); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_config_rss_eth(bp, false); + + return 0; } /** diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 5ab912937aff..ec0b545197e2 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS config CS89x0 tristate "CS89x0 support" depends on ISA || EISA || ARM + depends on !PPC32 ---help--- Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 9128858479c4..2353ec829c04 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) txq->txq_stats.tx_busy++; u64_stats_update_end(&txq->txq_stats.syncp); err = NETDEV_TX_BUSY; + wqe_size = 0; goto flush_skbs; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 7b1b5ac986d0..31bd56727022 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, u32 srqn = qp_get_srqn(qpc) & 0xffffff; int use_srq = (qp_get_srqn(qpc) >> 24) & 1; struct res_srq *srq; - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + int local_qpn = vhcr->in_modifier & 0xffffff; err = adjust_qp_sched_queue(dev, slave, qpc, inbox); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 323ffe8bf7e4..456f30007ad6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, int i; buf->size = size; - buf->npages = 1 << get_order(size); + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); buf->page_shift = PAGE_SHIFT; buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 75e4308ba786..d258bb679271 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) HLIST_HEAD(del_list); spin_lock_bh(&priv->fs.arfs.arfs_lock); mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) - break; if (!work_pending(&arfs_rule->arfs_work) && rps_may_expire_flow(priv->netdev, arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->filter_id)) { hlist_del_init(&arfs_rule->hlist); hlist_add_head(&arfs_rule->hlist, &del_list); + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) + break; } } spin_unlock_bh(&priv->fs.arfs.arfs_lock); @@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; + if (skb->encapsulation) + return -EPROTONOSUPPORT; + arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); if (!arfs_t) return -EPROTONOSUPPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 0a52f31fef37..86bc9ac99586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) } static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, - struct ieee_ets *ets) + struct ieee_ets *ets, + bool zero_sum_allowed) { bool have_ets_tc = false; int bw_sum = 0; @@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, } if (have_ets_tc && bw_sum != 100) { - netdev_err(netdev, - "Failed to validate ETS: BW sum is illegal\n"); + if (bw_sum || (!bw_sum && !zero_sum_allowed)) + netdev_err(netdev, + "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL; } return 0; @@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, ets)) return -EOPNOTSUPP; - err = mlx5e_dbcnl_validate_ets(netdev, ets); + err = mlx5e_dbcnl_validate_ets(netdev, ets, false); if (err) return err; @@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) ets.prio_tc[i]); } - err = mlx5e_dbcnl_validate_ets(netdev, &ets); - if (err) { - netdev_err(netdev, - "%s, Failed to validate ETS: %d\n", __func__, err); + err = mlx5e_dbcnl_validate_ets(netdev, &ets, true); + if (err) goto out; - } err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0edf4751a8ba..3a2c4e548226 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv, else actions = flow->nic_attr->action; + if (flow->flags & MLX5E_TC_FLOW_EGRESS && + !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)) + return false; + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) return modify_header_match_supported(&parse_attr->spec, exts); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b79d74860a30..dd01ad4c0b54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2216,6 +2216,6 @@ free_out: u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) { - return esw->mode; + return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f1a86cea86a0..6ddb2565884d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!fwd_next_prio_supported(ft)) return ERR_PTR(-EOPNOTSUPP); - if (dest) + if (dest_num) return ERR_PTR(-EINVAL); mutex_lock(&root->chain_lock); next_ft = find_next_chained_ft(prio); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 1e062e6b2587..3f767cde4c1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, void mlx5_init_clock(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; + u64 overflow_cycles; u64 ns; u64 frac = 0; u32 dev_freq; @@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. + * The period is calculated as the minimum between max HW cycles count + * (The clock source mask) and max amount of cycles that can be + * multiplied by clock multiplier where the result doesn't exceed + * 64bits. */ - ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, + overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); + overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1); + + ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, frac, &frac); - do_div(ns, NSEC_PER_SEC / 2 / HZ); + do_div(ns, NSEC_PER_SEC / HZ); clock->overflow_period = ns; mdev->clock_info_page = alloc_page(GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index b97bb72b4db4..86478a6b99c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -113,35 +113,45 @@ err_db_free: return err; } -static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, - struct mlx5_wq_qp *qp) +static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf, + struct mlx5_wq_qp *qp) { + struct mlx5_frag_buf_ctrl *sq_fbc; struct mlx5_frag_buf *rqb, *sqb; - rqb = &qp->rq.fbc.frag_buf; + rqb = &qp->rq.fbc.frag_buf; *rqb = *buf; rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); - rqb->npages = 1 << get_order(rqb->size); + rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE); - sqb = &qp->sq.fbc.frag_buf; - *sqb = *buf; - sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); - sqb->npages = 1 << get_order(sqb->size); + sq_fbc = &qp->sq.fbc; + sqb = &sq_fbc->frag_buf; + *sqb = *buf; + sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq); + sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE); sqb->frags += rqb->npages; /* first part is for the rq */ + if (sq_fbc->strides_offset) + sqb->frags--; } int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { + u32 sq_strides_offset; int err; mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, MLX5_GET(qpc, qpc, log_rq_size), &wq->rq.fbc); - mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), - MLX5_GET(qpc, qpc, log_sq_size), - &wq->sq.fbc); + + sq_strides_offset = + ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; + + mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), + MLX5_GET(qpc, qpc, log_sq_size), + sq_strides_offset, + &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); + mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq); wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 78afe75129ab..382bb93cb090 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, payload.dst_ipv4 = flow->daddr; /* If entry has expired send dst IP with all other fields 0. */ - if (!(neigh->nud_state & NUD_VALID)) { + if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { nfp_tun_del_route_from_cache(app, payload.dst_ipv4); /* Trigger ARP to verify invalid neighbour state. */ neigh_event_send(neigh, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 99973e10b179..5ede6408649d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; + u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); } @@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; @@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, /* explicitly clear out the entire vector */ memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - memset(bins, 0, sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + memset(bins, 0, sizeof(bins)); /* filter ADD op is explicit set op and it removes * any existing filters for the vport */ if (p_filter_cmd->opcode == QED_FILTER_ADD) { for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { - u32 bit; + u32 bit, nbits; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); - __set_bit(bit, bins); + nbits = sizeof(u32) * BITS_PER_BYTE; + bins[bit / nbits] |= 1 << (bit % nbits); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; - u32 *p_bins = (u32 *)bins; p_ramrod_bins = &p_ramrod->approx_mcast; - p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 806a8da257e9..8d80f1095d17 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -215,7 +215,7 @@ struct qed_sp_vport_update_params { u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; - unsigned long bins[8]; + u32 bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9d9e533bccdc..cdd645024a32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1211,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, break; default: p_link->speed = 0; + p_link->link_up = 0; } if (p_link->link_up && p_link->speed) @@ -1308,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; - if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { - if (params->eee.enable) - phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + + /* There are MFWs that share this capability regardless of whether + * this is feasible or not. And given that at the very least adv_caps + * would be set internally by qed, we want to make sure LFA would + * still work. + */ + if ((p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; if (params->eee.tx_lpi_enable) phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; if (params->eee.adv_caps & QED_EEE_1G_ADV) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index fd59cf45f4be..26e918d7f2f9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, p_data->update_approx_mcast_flg = 1; memcpy(p_data->bins, p_mcast_tlv->bins, - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 2d7fcd6a0777..be6ddde1a104 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, resp_size += sizeof(struct pfvf_def_resp_tlv); memcpy(p_mcast_tlv->bins, p_params->bins, - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; @@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, u32 bit; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); - __set_bit(bit, sp_params.bins); + sp_params.bins[bit / 32] |= 1 << (bit % 32); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 4f05d5eb3cf5..033409db86ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; - u64 bins[8]; + /* There are only 256 approx bins, and in HSI they're divided into + * 32-bit values. As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; }; struct vfpf_vport_update_accept_param_tlv { diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3f69901ac87..eaedc11ed686 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } - /* override BIOS settings, use userspace tools to enable WOL */ - __rtl8169_set_wol(tp, 0); + tp->saved_wolopts = __rtl8169_get_wol(tp); if (rtl_tbi_enabled(tp)) { tp->set_speed = rtl8169_set_speed_tbi; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 16c3bfbe1992..757a3b37ae8a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -218,6 +218,7 @@ issue: ret = of_mdiobus_register(bus, np1); if (ret) { mdiobus_free(bus); + lp->mii_bus = NULL; return ret; } return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 537297d2b4b4..6c9b24fe3148 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) * negotiation may already be done and aneg interrupt may not be * generated. */ - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { err = phy_aneg_done(phydev); if (err > 0) { trigger = true; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 38502809420b..cb0cc30c3d6a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ - {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f6bb1d54d4bd..e857cb3335f6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -636,9 +636,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Add new entry to forwarding table -- assumes lock held */ +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, + const u8 *mac, __u16 state, + __be32 src_vni, __u8 ndm_flags) +{ + struct vxlan_fdb *f; + + f = kmalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + f->state = state; + f->flags = ndm_flags; + f->updated = f->used = jiffies; + f->vni = src_vni; + INIT_LIST_HEAD(&f->remotes); + memcpy(f->eth_addr, mac, ETH_ALEN); + + return f; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u8 ndm_flags, + struct vxlan_fdb **fdb) +{ + struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; + int rc; + + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) + return -ENOSPC; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); + if (!f) + return -ENOMEM; + + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } + + ++vxlan->addrcnt; + hlist_add_head_rcu(&f->hlist, + vxlan_fdb_head(vxlan, mac, src_vni)); + + *fdb = f; + + return 0; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, __u32 ifindex, __u8 ndm_flags) @@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->cfg.addrmax && - vxlan->addrcnt >= vxlan->cfg.addrmax) - return -ENOSPC; - /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = kmalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return -ENOMEM; - - notify = 1; - f->state = state; - f->flags = ndm_flags; - f->updated = f->used = jiffies; - f->vni = src_vni; - INIT_LIST_HEAD(&f->remotes); - memcpy(f->eth_addr, mac, ETH_ALEN); - - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, ndm_flags, &f); + if (rc < 0) return rc; - } - - ++vxlan->addrcnt; - hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac, src_vni)); + notify = 1; } if (notify) { @@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head) kfree(f); } -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, + bool do_notify) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); + if (do_notify) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, goto out; } - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); out: return 0; @@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev, /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, + vxlan_fdb_update(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, @@ -2364,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2415,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2469,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } } spin_unlock_bh(&vxlan->hash_lock); @@ -3160,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f = NULL; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3173,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) return err; } err = register_netdevice(dev); + if (err) + goto errout; + + err = rtnl_configure_link(dev, NULL); if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; + unregister_netdevice(dev); + goto errout; } + /* notify default fdb entry */ + if (f) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + list_add(&vxlan->next, &vn->vxlan_list); return 0; +errout: + if (f) + vxlan_fdb_destroy(vxlan, f, false); + return err; } static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], @@ -3425,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; + struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3453,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], err = vxlan_fdb_create(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_CREATE | NLM_F_APPEND, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 903eb4545e26..f7efe5a58cc7 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport( /* * For something we're not in a state to send to the device the default action * is to busy it and retry it after the controller state is recovered. However, - * anything marked for failfast or nvme multipath is immediately failed. + * if the controller is deleting or if anything is marked for failfast or + * nvme multipath it is immediately failed. * * Note: commands used to initialize the controller will be marked for failfast. * Note: nvme cli/ioctl commands are marked for failfast. */ -blk_status_t nvmf_fail_nonready_command(struct request *rq) +blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, + struct request *rq) { - if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) + if (ctrl->state != NVME_CTRL_DELETING && + ctrl->state != NVME_CTRL_DEAD && + !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; nvme_req(rq)->status = NVME_SC_ABORT_REQ; return BLK_STS_IOERR; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index e1818a27aa2d..aa2fdb2a2e8f 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops); void nvmf_free_options(struct nvmf_ctrl_options *opts); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); -blk_status_t nvmf_fail_nonready_command(struct request *rq); +blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, + struct request *rq); bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 41d45a1b5c62..9bac912173ba 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) - return nvmf_fail_nonready_command(rq); + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); ret = nvme_setup_cmd(ns, rq, sqe); if (ret) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 518c5b09038c..66ec5985c9f3 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(rq->tag < 0); if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) - return nvmf_fail_nonready_command(rq); + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); dev = queue->device->dev; ib_dma_sync_single_for_cpu(dev, sqe->dma, diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index d3f3b3ec4d1a..ebea1373d1b7 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item, { struct nvmet_ns *ns = to_nvmet_ns(item); struct nvmet_subsys *subsys = ns->subsys; + size_t len; int ret; mutex_lock(&subsys->lock); @@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item, if (ns->enabled) goto out_unlock; - kfree(ns->device_path); + ret = -EINVAL; + len = strcspn(page, "\n"); + if (!len) + goto out_unlock; + kfree(ns->device_path); ret = -ENOMEM; - ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); + ns->device_path = kstrndup(page, len, GFP_KERNEL); if (!ns->device_path) goto out_unlock; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 74d4b785d2da..9838103f2d62 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) goto out_unlock; ret = nvmet_bdev_ns_enable(ns); - if (ret) + if (ret == -ENOTBLK) ret = nvmet_file_ns_enable(ns); if (ret) goto out_unlock; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 408279cb6f2c..29b4b236afd8 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod { struct work_struct work; } __aligned(sizeof(unsigned long long)); +/* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) -#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) enum nvmet_fcp_datadir { NVMET_FCP_NODATA, @@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod { struct nvme_fc_cmd_iu cmdiubuf; struct nvme_fc_ersp_iu rspiubuf; dma_addr_t rspdma; + struct scatterlist *next_sg; struct scatterlist *data_sg; int data_sg_cnt; u32 offset; @@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, INIT_LIST_HEAD(&newrec->assoc_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); - newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, - template->max_sgl_segments); + newrec->max_sg_cnt = template->max_sgl_segments; ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) ((fod->io_dir == NVMET_FCP_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); /* note: write from initiator perspective */ + fod->next_sg = fod->data_sg; return 0; @@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, u8 op) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; + struct scatterlist *sg = fod->next_sg; unsigned long flags; - u32 tlen; + u32 remaininglen = fod->req.transfer_len - fod->offset; + u32 tlen = 0; int ret; fcpreq->op = op; fcpreq->offset = fod->offset; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; - tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, - (fod->req.transfer_len - fod->offset)); + /* + * for next sequence: + * break at a sg element boundary + * attempt to keep sequence length capped at + * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to + * be longer if a single sg element is larger + * than that amount. This is done to avoid creating + * a new sg list to use for the tgtport api. + */ + fcpreq->sg = sg; + fcpreq->sg_cnt = 0; + while (tlen < remaininglen && + fcpreq->sg_cnt < tgtport->max_sg_cnt && + tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { + fcpreq->sg_cnt++; + tlen += sg_dma_len(sg); + sg = sg_next(sg); + } + if (tlen < remaininglen && fcpreq->sg_cnt == 0) { + fcpreq->sg_cnt++; + tlen += min_t(u32, sg_dma_len(sg), remaininglen); + sg = sg_next(sg); + } + if (tlen < remaininglen) + fod->next_sg = sg; + else + fod->next_sg = NULL; + fcpreq->transfer_length = tlen; fcpreq->transferred_length = 0; fcpreq->fcp_error = 0; fcpreq->rsplen = 0; - fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; - fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); - /* * If the last READDATA request: check if LLDD supports * combined xfr with response. diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d8d91f04bd7e..ae7586b8be07 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, blk_status_t ret; if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) - return nvmf_fail_nonready_command(req); + return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req); ret = nvme_setup_cmd(ns, req, &iod->cmd); if (ret) diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index f7ce0cb0b0b7..f02e334beb45 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service) parent = udev->subordinate; pci_lock_rescan_remove(); + pci_dev_get(dev); list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, bus_list) { pci_dev_get(pdev); @@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service) pci_info(dev, "Device recovery from fatal error failed\n"); } + pci_dev_put(dev); pci_unlock_rescan_remove(); } diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index 1b7febc43da9..29d2c3b1913a 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params) { void __iomem *ctrl = params->ctrl_regs; + USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE); + /* 1 millisecond - for USB clocks to settle down */ + usleep_range(1000, 2000); + if (BRCM_ID(params->family_id) == 0x7366) { /* * The PHY3_SOFT_RESETB bits default to the wrong state. diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 23705e1a0023..0075fb0bef8c 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work) ddata = container_of(work, struct phy_mdm6600, status_work.work); dev = ddata->dev; - error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES, + error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES, ddata->status_gpios->desc, values); if (error) return; - for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) { + for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { val |= values[i] << i; dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", __func__, i, values[i], val); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 8932ae81a15a..2715cdaa669c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) rtn = host->hostt->eh_timed_out(scmd); if (rtn == BLK_EH_DONE) { + /* + * For blk-mq, we must set the request state to complete now + * before sending the request to the scsi error handler. This + * will prevent a use-after-free in the event the LLD manages + * to complete the request before the error handler finishes + * processing this timed out request. + * + * If the request was already completed, then the LLD beat the + * time out handler from transferring the request to the scsi + * error handler. In that case we can return immediately as no + * further action is required. + */ + if (req->q->mq_ops && !blk_mq_mark_complete(req)) + return rtn; if (scsi_abort_command(scmd) != SUCCESS) { set_host_byte(scmd, DID_TIME_OUT); scsi_eh_scmd_add(scmd); diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 0ecffab52ec2..abdaf7cf8162 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv) memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); if (dev->flags & IFF_PROMISC) { - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_PROMISC); + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, + MCAST_FILTER_PROMISC); goto spin_unlock; } if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || (dev->flags & IFF_ALLMULTI)) { - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_MCASTALL); + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, + MCAST_FILTER_MCASTALL); goto spin_unlock; } @@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv) ETH_ALEN * mc_count); } else { priv->sme_i.sme_flag |= SME_MULTICAST; - hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_MCAST); + hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, + MCAST_FILTER_MCAST); } spin_unlock: diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index a3a83424a926..16478fe9e3f8 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -11,7 +11,6 @@ * (at your option) any later version. */ -#include <asm/cacheflush.h> #include <linux/clk.h> #include <linux/mm.h> #include <linux/pagemap.h> @@ -24,6 +23,8 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> +#include <asm/cacheflush.h> + #include "iss_video.h" #include "iss.h" diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig index 673fdce25530..ff7832798a77 100644 --- a/drivers/staging/rtl8188eu/Kconfig +++ b/drivers/staging/rtl8188eu/Kconfig @@ -7,7 +7,6 @@ config R8188EU select LIB80211 select LIB80211_CRYPT_WEP select LIB80211_CRYPT_CCMP - select LIB80211_CRYPT_TKIP ---help--- This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N. If built as a module, it will be called r8188eu. diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 05936a45eb93..c6857a5be12a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -23,7 +23,6 @@ #include <mon.h> #include <wifi.h> #include <linux/vmalloc.h> -#include <net/lib80211.h> #define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */ #define LLC_HEADER_SIZE 6 /* LLC Header Length */ @@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter) static int recvframe_chkmic(struct adapter *adapter, struct recv_frame *precvframe) { - int res = _SUCCESS; - struct rx_pkt_attrib *prxattrib = &precvframe->attrib; - struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta); + int i, res = _SUCCESS; + u32 datalen; + u8 miccode[8]; + u8 bmic_err = false, brpt_micerror = true; + u8 *pframe, *payload, *pframemic; + u8 *mickey; + struct sta_info *stainfo; + struct rx_pkt_attrib *prxattrib = &precvframe->attrib; + struct security_priv *psecuritypriv = &adapter->securitypriv; + + struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; + struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); + + stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]); if (prxattrib->encrypt == _TKIP_) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__)); + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", + __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5])); + + /* calculate mic code */ if (stainfo) { - int key_idx; - const int iv_len = 8, icv_len = 4, key_length = 32; - struct sk_buff *skb = precvframe->pkt; - u8 key[32], iv[8], icv[4], *pframe = skb->data; - void *crypto_private = NULL; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip"); - struct security_priv *psecuritypriv = &adapter->securitypriv; - if (IS_MCAST(prxattrib->ra)) { if (!psecuritypriv) { res = _FAIL; @@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter, DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__); goto exit; } - key_idx = prxattrib->key_index; - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); + mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0]; + + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n %s: bcmc key\n", __func__)); } else { - key_idx = 0; - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); + mickey = &stainfo->dot11tkiprxmickey.skey[0]; + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("\n %s: unicast key\n", __func__)); } - if (!crypto_ops) { - res = _FAIL; - goto exit_lib80211_tkip; - } + /* icv_len included the mic code */ + datalen = precvframe->pkt->len-prxattrib->hdrlen - + prxattrib->iv_len-prxattrib->icv_len-8; + pframe = precvframe->pkt->data; + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len; - memcpy(iv, pframe + prxattrib->hdrlen, iv_len); - memcpy(icv, pframe + skb->len - icv_len, icv_len); - memmove(pframe + iv_len, pframe, prxattrib->hdrlen); + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len)); + rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], + (unsigned char)prxattrib->priority); /* care the length of the data */ - skb_pull(skb, iv_len); - skb_trim(skb, skb->len - icv_len); + pframemic = payload+datalen; - crypto_private = crypto_ops->init(key_idx); - if (!crypto_private) { - res = _FAIL; - goto exit_lib80211_tkip; - } - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { - res = _FAIL; - goto exit_lib80211_tkip; - } - if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) { - res = _FAIL; - goto exit_lib80211_tkip; + bmic_err = false; + + for (i = 0; i < 8; i++) { + if (miccode[i] != *(pframemic+i)) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ", + __func__, i, miccode[i], i, *(pframemic + i))); + bmic_err = true; + } } - memmove(pframe, pframe + iv_len, prxattrib->hdrlen); - skb_push(skb, iv_len); - skb_put(skb, icv_len); + if (bmic_err) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", + *(pframemic-8), *(pframemic-7), *(pframemic-6), + *(pframemic-5), *(pframemic-4), *(pframemic-3), + *(pframemic-2), *(pframemic-1))); + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", + *(pframemic-16), *(pframemic-15), *(pframemic-14), + *(pframemic-13), *(pframemic-12), *(pframemic-11), + *(pframemic-10), *(pframemic-9))); + { + uint i; - memcpy(pframe + prxattrib->hdrlen, iv, iv_len); - memcpy(pframe + skb->len - icv_len, icv, icv_len); + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("\n ======demp packet (len=%d)======\n", + precvframe->pkt->len)); + for (i = 0; i < precvframe->pkt->len; i += 8) { + RT_TRACE(_module_rtl871x_recv_c_, + _drv_err_, + ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x", + *(precvframe->pkt->data+i), + *(precvframe->pkt->data+i+1), + *(precvframe->pkt->data+i+2), + *(precvframe->pkt->data+i+3), + *(precvframe->pkt->data+i+4), + *(precvframe->pkt->data+i+5), + *(precvframe->pkt->data+i+6), + *(precvframe->pkt->data+i+7))); + } + RT_TRACE(_module_rtl871x_recv_c_, + _drv_err_, + ("\n ====== demp packet end [len=%d]======\n", + precvframe->pkt->len)); + RT_TRACE(_module_rtl871x_recv_c_, + _drv_err_, + ("\n hrdlen=%d,\n", + prxattrib->hdrlen)); + } -exit_lib80211_tkip: - if (crypto_ops && crypto_private) - crypto_ops->deinit(crypto_private); + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, + ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ", + prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey)); + + /* double check key_index for some timing issue , */ + /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */ + if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index)) + brpt_micerror = false; + + if ((prxattrib->bdecrypted) && (brpt_micerror)) { + rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra)); + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted)); + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted); + } else { + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted)); + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted); + } + res = _FAIL; + } else { + /* mic checked ok */ + if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) { + psecuritypriv->bcheck_grpkey = true; + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true")); + } + } } else { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("%s: rtw_get_stainfo==NULL!!!\n", __func__)); } + + skb_trim(precvframe->pkt, precvframe->pkt->len - 8); } exit: + return res; } diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c index bfe0b217e679..67a2490f055e 100644 --- a/drivers/staging/rtl8188eu/core/rtw_security.c +++ b/drivers/staging/rtl8188eu/core/rtw_security.c @@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe) return res; } +/* The hlen isn't include the IV */ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) -{ - struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; - u32 res = _SUCCESS; +{ /* exclude ICV */ + u16 pnl; + u32 pnh; + u8 rc4key[16]; + u8 ttkey[16]; + u8 crc[4]; + struct arc4context mycontext; + int length; + + u8 *pframe, *payload, *iv, *prwskey; + union pn48 dot11txpn; + struct sta_info *stainfo; + struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; + struct security_priv *psecuritypriv = &padapter->securitypriv; + u32 res = _SUCCESS; + + + pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data; /* 4 start to decrypt recvframe */ if (prxattrib->encrypt == _TKIP_) { - struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta); - + stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]); if (stainfo) { - int key_idx; - const int iv_len = 8, icv_len = 4, key_length = 32; - void *crypto_private = NULL; - struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; - u8 key[32], iv[8], icv[4], *pframe = skb->data; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip"); - struct security_priv *psecuritypriv = &padapter->securitypriv; - if (IS_MCAST(prxattrib->ra)) { if (!psecuritypriv->binstallGrpkey) { res = _FAIL; DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__); goto exit; } - key_idx = prxattrib->key_index; - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); + prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey; } else { - key_idx = 0; - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); + RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__)); + prwskey = &stainfo->dot118021x_UncstKey.skey[0]; } - if (!crypto_ops) { - res = _FAIL; - goto exit_lib80211_tkip; - } + iv = pframe+prxattrib->hdrlen; + payload = pframe+prxattrib->iv_len+prxattrib->hdrlen; + length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len; - memcpy(iv, pframe + prxattrib->hdrlen, iv_len); - memcpy(icv, pframe + skb->len - icv_len, icv_len); + GET_TKIP_PN(iv, dot11txpn); - crypto_private = crypto_ops->init(key_idx); - if (!crypto_private) { - res = _FAIL; - goto exit_lib80211_tkip; - } - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { - res = _FAIL; - goto exit_lib80211_tkip; - } - if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) { - res = _FAIL; - goto exit_lib80211_tkip; - } + pnl = (u16)(dot11txpn.val); + pnh = (u32)(dot11txpn.val>>16); - memmove(pframe, pframe + iv_len, prxattrib->hdrlen); - skb_push(skb, iv_len); - skb_put(skb, icv_len); + phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh); + phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl); - memcpy(pframe + prxattrib->hdrlen, iv, iv_len); - memcpy(pframe + skb->len - icv_len, icv, icv_len); + /* 4 decrypt payload include icv */ -exit_lib80211_tkip: - if (crypto_ops && crypto_private) - crypto_ops->deinit(crypto_private); + arcfour_init(&mycontext, rc4key, 16); + arcfour_encrypt(&mycontext, payload, payload, length); + + *((__le32 *)crc) = getcrc32(payload, length-4); + + if (crc[3] != payload[length-1] || + crc[2] != payload[length-2] || + crc[1] != payload[length-3] || + crc[0] != payload[length-4]) { + RT_TRACE(_module_rtl871x_security_c_, _drv_err_, + ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n", + &crc, &payload[length-4])); + res = _FAIL; + } } else { RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n")); res = _FAIL; diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index a61bc41b82d7..947c79532e10 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, int chars_sent = 0; char __user *cp; char *init; + size_t bytes_per_ch = unicode ? 3 : 1; u16 ch; int empty; unsigned long flags; DEFINE_WAIT(wait); + if (count < bytes_per_ch) + return -EINVAL; + spin_lock_irqsave(&speakup_info.spinlock, flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); @@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, init = get_initstring(); /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ - while (chars_sent <= count - 3) { + while (chars_sent <= count - bytes_per_ch) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 785f0ed037f7..ee34e9046f7e 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -3,6 +3,7 @@ config USB_CHIPIDEA depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA select EXTCON select RESET_CONTROLLER + select USB_ULPI_BUS help Say Y here if your system has a dual role high speed USB controller based on ChipIdea silicon IP. It supports: @@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST help Say Y here to enable host controller functionality of the ChipIdea driver. - -config USB_CHIPIDEA_ULPI - bool "ChipIdea ULPI PHY support" - depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA - help - Say Y here if you have a ULPI PHY attached to your ChipIdea - controller. - endif diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile index e3d5e728fa53..12df94f78f72 100644 --- a/drivers/usb/chipidea/Makefile +++ b/drivers/usb/chipidea/Makefile @@ -1,11 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o -ci_hdrc-y := core.o otg.o debug.o +ci_hdrc-y := core.o otg.o debug.o ulpi.o ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o -ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o # Glue/Bridge layers go here diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index 0bf244d50544..6a2cc5cd0281 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -240,10 +240,8 @@ struct ci_hdrc { struct ci_hdrc_platform_data *platdata; int vbus_active; -#ifdef CONFIG_USB_CHIPIDEA_ULPI struct ulpi *ulpi; struct ulpi_ops ulpi_ops; -#endif struct phy *phy; /* old usb_phy interface */ struct usb_phy *usb_phy; @@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci) #endif } -#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI) int ci_ulpi_init(struct ci_hdrc *ci); void ci_ulpi_exit(struct ci_hdrc *ci); int ci_ulpi_resume(struct ci_hdrc *ci); -#else -static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; } -static inline void ci_ulpi_exit(struct ci_hdrc *ci) { } -static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; } -#endif u32 hw_read_intr_enable(struct ci_hdrc *ci); diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c index 6da42dcd2888..dfec07e8ae1d 100644 --- a/drivers/usb/chipidea/ulpi.c +++ b/drivers/usb/chipidea/ulpi.c @@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci) { int cnt = 100000; + if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI) + return 0; + while (cnt-- > 0) { if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE)) return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 998b32d0167e..75c4623ad779 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ }, + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ .driver_info = CLEAR_HALT_CONDITIONS, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index fcae521df29b..1fb266809966 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell hub_wq to disconnect the device or - * check for a new connection + * check for a new connection or over current condition. + * Based on USB2.0 Spec Section 11.12.5, + * C_PORT_OVER_CURRENT could be set while + * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || - (portstatus & USB_PORT_STAT_OVERCURRENT)) + (portstatus & USB_PORT_STAT_OVERCURRENT) || + (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index a0f82cca2d9a..cefc99ae69b2 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg) for (idx = 1; idx < hsotg->num_of_eps; idx++) { hs_ep = hsotg->eps_in[idx]; /* Proceed only unmasked ISOC EPs */ - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) continue; epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); @@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg) for (idx = 1; idx < hsotg->num_of_eps; idx++) { hs_ep = hsotg->eps_out[idx]; /* Proceed only unmasked ISOC EPs */ - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) continue; epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); @@ -3650,7 +3650,7 @@ irq_retry: for (idx = 1; idx < hsotg->num_of_eps; idx++) { hs_ep = hsotg->eps_out[idx]; /* Proceed only unmasked ISOC EPs */ - if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) + if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) continue; epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index b1104be3429c..6e2cdd7b93d4 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg, #define DWC2_USB_DMA_ALIGN 4 -struct dma_aligned_buffer { - void *kmalloc_ptr; - void *old_xfer_buffer; - u8 data[0]; -}; - static void dwc2_free_dma_aligned_buffer(struct urb *urb) { - struct dma_aligned_buffer *temp; + void *stored_xfer_buffer; + size_t length; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; - temp = container_of(urb->transfer_buffer, - struct dma_aligned_buffer, data); + /* Restore urb->transfer_buffer from the end of the allocated area */ + memcpy(&stored_xfer_buffer, urb->transfer_buffer + + urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); - if (usb_urb_dir_in(urb)) - memcpy(temp->old_xfer_buffer, temp->data, - urb->transfer_buffer_length); - urb->transfer_buffer = temp->old_xfer_buffer; - kfree(temp->kmalloc_ptr); + if (usb_urb_dir_in(urb)) { + if (usb_pipeisoc(urb->pipe)) + length = urb->transfer_buffer_length; + else + length = urb->actual_length; + + memcpy(stored_xfer_buffer, urb->transfer_buffer, length); + } + kfree(urb->transfer_buffer); + urb->transfer_buffer = stored_xfer_buffer; urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - struct dma_aligned_buffer *temp, *kmalloc_ptr; + void *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || @@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) return 0; - /* Allocate a buffer with enough padding for alignment */ + /* + * Allocate a buffer with enough padding for original transfer_buffer + * pointer. This allocation is guaranteed to be aligned properly for + * DMA + */ kmalloc_size = urb->transfer_buffer_length + - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; - /* Position our struct dma_aligned_buffer such that data is aligned */ - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; - temp->kmalloc_ptr = kmalloc_ptr; - temp->old_xfer_buffer = urb->transfer_buffer; + /* + * Position value of original urb->transfer_buffer pointer to the end + * of allocation for later referencing + */ + memcpy(kmalloc_ptr + urb->transfer_buffer_length, + &urb->transfer_buffer, sizeof(urb->transfer_buffer)); + if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, + memcpy(kmalloc_ptr, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; + urb->transfer_buffer = kmalloc_ptr; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index ed7f05cf4906..8ce10caf3e19 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, * avoid interrupt storms we'll wait before retrying if we've got * several NAKs. If we didn't do this we'd retry directly from the * interrupt handler and could end up quickly getting another - * interrupt (another NAK), which we'd retry. + * interrupt (another NAK), which we'd retry. Note that we do not + * delay retries for IN parts of control requests, as those are expected + * to complete fairly quickly, and if we delay them we risk confusing + * the device and cause it issue STALL. * * Note that in DMA mode software only gets involved to re-send NAKed * transfers for split transactions, so we only need to apply this @@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, qtd->error_count = 0; qtd->complete_split = 0; qtd->num_naks++; - qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; + qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY && + !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL && + chan->ep_is_in); dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); goto handle_nak_done; } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index c77ff50a88a2..8efde178eef4 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = dwc3_ep0_start_trans(dep); } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && req->request.length && req->request.zero) { - u32 maxpacket; ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, dep->number); if (ret) return; - maxpacket = dep->endpoint.maxpacket; - /* prepare normal TRB */ dwc3_ep0_prepare_one_trb(dep, req->request.dma, req->request.length, diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d2fa071c21b1..b8a15840b4ff 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1819,7 +1819,6 @@ unknown: if (cdev->use_os_string && cdev->os_desc_config && (ctrl->bRequestType & USB_TYPE_VENDOR) && ctrl->bRequest == cdev->b_vendor_code) { - struct usb_request *req; struct usb_configuration *os_desc_cfg; u8 *buf; int interface; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 33e2030503fa..3ada83d81bda 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f, __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); - return USB_GADGET_DELAYED_STATUS; + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; } static bool ffs_func_req_match(struct usb_function *f, diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index d2dc1f00180b..d582921f7257 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = { }; struct cntrl_cur_lay3 { - __u32 dCUR; + __le32 dCUR; }; struct cntrl_range_lay3 { - __u16 wNumSubRanges; - __u32 dMIN; - __u32 dMAX; - __u32 dRES; + __le16 wNumSubRanges; + __le32 dMIN; + __le32 dMAX; + __le32 dRES; } __packed; static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, @@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); if (!agdev->out_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; + return -ENODEV; } agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); if (!agdev->in_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; + return -ENODEV; } agdev->in_ep_maxpsize = max_t(u16, @@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) memset(&c, 0, sizeof(struct cntrl_cur_lay3)); if (entity_id == USB_IN_CLK_ID) - c.dCUR = p_srate; + c.dCUR = cpu_to_le32(p_srate); else if (entity_id == USB_OUT_CLK_ID) - c.dCUR = c_srate; + c.dCUR = cpu_to_le32(c_srate); value = min_t(unsigned, w_length, sizeof c); memcpy(req->buf, &c, value); @@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { if (entity_id == USB_IN_CLK_ID) - r.dMIN = p_srate; + r.dMIN = cpu_to_le32(p_srate); else if (entity_id == USB_OUT_CLK_ID) - r.dMIN = c_srate; + r.dMIN = cpu_to_le32(c_srate); else return -EOPNOTSUPP; r.dMAX = r.dMIN; r.dRES = 0; - r.wNumSubRanges = 1; + r.wNumSubRanges = cpu_to_le16(1); value = min_t(unsigned, w_length, sizeof r); memcpy(req->buf, &r, value); diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index a72295c953bb..fb5ed97572e5 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -32,9 +32,6 @@ struct uac_req { struct uac_rtd_params { struct snd_uac_chip *uac; /* parent chip */ bool ep_enabled; /* if the ep is enabled */ - /* Size of the ring buffer */ - size_t dma_bytes; - unsigned char *dma_area; struct snd_pcm_substream *ss; @@ -43,8 +40,6 @@ struct uac_rtd_params { void *rbuf; - size_t period_size; - unsigned max_psize; /* MaxPacketSize of endpoint */ struct uac_req *ureq; @@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = { static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) { unsigned pending; - unsigned long flags; + unsigned long flags, flags2; unsigned int hw_ptr; - bool update_alsa = false; int status = req->status; struct uac_req *ur = req->context; struct snd_pcm_substream *substream; + struct snd_pcm_runtime *runtime; struct uac_rtd_params *prm = ur->pp; struct snd_uac_chip *uac = prm->uac; @@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) if (!substream) goto exit; + snd_pcm_stream_lock_irqsave(substream, flags2); + + runtime = substream->runtime; + if (!runtime || !snd_pcm_running(substream)) { + snd_pcm_stream_unlock_irqrestore(substream, flags2); + goto exit; + } + spin_lock_irqsave(&prm->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { @@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) req->actual = req->length; } - pending = prm->hw_ptr % prm->period_size; - pending += req->actual; - if (pending >= prm->period_size) - update_alsa = true; - hw_ptr = prm->hw_ptr; - prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes; spin_unlock_irqrestore(&prm->lock, flags); /* Pack USB load in ALSA ring buffer */ - pending = prm->dma_bytes - hw_ptr; + pending = runtime->dma_bytes - hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (unlikely(pending < req->actual)) { - memcpy(req->buf, prm->dma_area + hw_ptr, pending); - memcpy(req->buf + pending, prm->dma_area, + memcpy(req->buf, runtime->dma_area + hw_ptr, pending); + memcpy(req->buf + pending, runtime->dma_area, req->actual - pending); } else { - memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); + memcpy(req->buf, runtime->dma_area + hw_ptr, + req->actual); } } else { if (unlikely(pending < req->actual)) { - memcpy(prm->dma_area + hw_ptr, req->buf, pending); - memcpy(prm->dma_area, req->buf + pending, + memcpy(runtime->dma_area + hw_ptr, req->buf, pending); + memcpy(runtime->dma_area, req->buf + pending, req->actual - pending); } else { - memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); + memcpy(runtime->dma_area + hw_ptr, req->buf, + req->actual); } } + spin_lock_irqsave(&prm->lock, flags); + /* update hw_ptr after data is copied to memory */ + prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes; + hw_ptr = prm->hw_ptr; + spin_unlock_irqrestore(&prm->lock, flags); + snd_pcm_stream_unlock_irqrestore(substream, flags2); + + if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual) + snd_pcm_period_elapsed(substream); + exit: if (usb_ep_queue(ep, req, GFP_ATOMIC)) dev_err(uac->card->dev, "%d Error!\n", __LINE__); - - if (update_alsa) - snd_pcm_period_elapsed(substream); } static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) @@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream) static int uac_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); - struct uac_rtd_params *prm; - int err; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - prm = &uac->p_prm; - else - prm = &uac->c_prm; - - err = snd_pcm_lib_malloc_pages(substream, + return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); - if (err >= 0) { - prm->dma_bytes = substream->runtime->dma_bytes; - prm->dma_area = substream->runtime->dma_area; - prm->period_size = params_period_bytes(hw_params); - } - - return err; } static int uac_pcm_hw_free(struct snd_pcm_substream *substream) { - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); - struct uac_rtd_params *prm; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - prm = &uac->p_prm; - else - prm = &uac->c_prm; - - prm->dma_area = NULL; - prm->dma_bytes = 0; - prm->period_size = 0; - return snd_pcm_lib_free_pages(substream); } @@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, if (err < 0) goto snd_fail; - strcpy(pcm->name, pcm_name); + strlcpy(pcm->name, pcm_name, sizeof(pcm->name)); pcm->private_data = uac; uac->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); - strcpy(card->driver, card_name); - strcpy(card->shortname, card_name); + strlcpy(card->driver, card_name, sizeof(card->driver)); + strlcpy(card->shortname, card_name, sizeof(card->shortname)); sprintf(card->longname, "%s %i", card_name, card->dev->id); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c index 20ffb03ff6ac..e2927fb083cf 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c @@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep) /* Check our state, cancel pending requests if needed */ if (ep->ep0.state != ep0_state_token) { EPDBG(ep, "wrong state\n"); + ast_vhub_nuke(ep, -EIO); + + /* + * Accept the packet regardless, this seems to happen + * when stalling a SETUP packet that has an OUT data + * phase. + */ ast_vhub_nuke(ep, 0); goto stall; } @@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep, if (chunk && req->req.buf) memcpy(ep->buf, req->req.buf + req->req.actual, chunk); + vhub_dma_workaround(ep->buf); + /* Remember chunk size and trigger send */ reg = VHUB_EP0_SET_TX_LEN(chunk); writel(reg, ep->ep0.ctlstat); @@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep) EPVDBG(ep, "rx prime\n"); /* Prime endpoint for receiving data */ - writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); + writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat); } static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 80c9feac5147..5939eb1e97f2 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req) if (!req->req.dma) { /* For IN transfers, copy data over first */ - if (ep->epn.is_in) + if (ep->epn.is_in) { memcpy(ep->buf, req->req.buf + act, chunk); + vhub_dma_workaround(ep->buf); + } writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); - } else + } else { + if (ep->epn.is_in) + vhub_dma_workaround(req->req.buf); writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); + } /* Start DMA */ req->active = true; @@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep) static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, struct ast_vhub_req *req) { + struct ast_vhub_desc *desc = NULL; unsigned int act = req->act_count; unsigned int len = req->req.length; unsigned int chunk; @@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, /* While we can create descriptors */ while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { - struct ast_vhub_desc *desc; unsigned int d_num; /* Grab next free descriptor */ @@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, req->act_count = act = act + chunk; } + if (likely(desc)) + vhub_dma_workaround(desc); + /* Tell HW about new descriptors */ writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), ep->epn.regs + AST_VHUB_EP_DESC_STATUS); diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h index 2b040257bc1f..4ed03d33a5a9 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h +++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h @@ -462,6 +462,39 @@ enum std_req_rc { #define DDBG(d, fmt, ...) do { } while(0) #endif +static inline void vhub_dma_workaround(void *addr) +{ + /* + * This works around a confirmed HW issue with the Aspeed chip. + * + * The core uses a different bus to memory than the AHB going to + * the USB device controller. Due to the latter having a higher + * priority than the core for arbitration on that bus, it's + * possible for an MMIO to the device, followed by a DMA by the + * device from memory to all be performed and services before + * a previous store to memory gets completed. + * + * This the following scenario can happen: + * + * - Driver writes to a DMA descriptor (Mbus) + * - Driver writes to the MMIO register to start the DMA (AHB) + * - The gadget sees the second write and sends a read of the + * descriptor to the memory controller (Mbus) + * - The gadget hits memory before the descriptor write + * causing it to read an obsolete value. + * + * Thankfully the problem is limited to the USB gadget device, other + * masters in the SoC all have a lower priority than the core, thus + * ensuring that the store by the core arrives first. + * + * The workaround consists of using a dummy read of the memory before + * doing the MMIO writes. This will ensure that the previous writes + * have been "pushed out". + */ + mb(); + (void)__raw_readl((void __iomem *)addr); +} + /* core.c */ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, int status); diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index a3ecce62662b..11e25a3f4f1f 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597) r8a66597_bset(r8a66597, XCKE, SYSCFG0); - msleep(3); + mdelay(3); r8a66597_bset(r8a66597, PLLC, SYSCFG0); - msleep(1); + mdelay(1); r8a66597_bset(r8a66597, SCKE, SYSCFG0); @@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock) r8a66597->ep0_req->length = 2; /* AV: what happens if we get called again before that gets through? */ spin_unlock(&r8a66597->lock); - r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); + r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); spin_lock(&r8a66597->lock); } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2f4850f25e82..68e6132aa8b2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, if (!list_empty(&ep->ring->td_list)) { dev_err(&udev->dev, "EP not empty, refuse reset\n"); spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); goto cleanup; } xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index 900875f326d7..f7c96d209eda 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev) if (pdata->init && pdata->init(pdev) != 0) return -EINVAL; +#ifdef CONFIG_PPC32 if (pdata->big_endian_mmio) { _fsl_readl = _fsl_readl_be; _fsl_writel = _fsl_writel_be; @@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev) _fsl_readl = _fsl_readl_le; _fsl_writel = _fsl_writel_le; } +#endif /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); @@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev) /* * state file in sysfs */ -static int show_fsl_usb2_otg_state(struct device *dev, +static ssize_t show_fsl_usb2_otg_state(struct device *dev, struct device_attribute *attr, char *buf) { struct otg_fsm *fsm = &fsl_otg_dev->fsm; diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 150f43668bec..d1d20252bad8 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -2140,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) * PPS APDO. Again skip the first sink PDO as this will * always be 5V 3A. */ - for (j = i; j < port->nr_snk_pdo; j++) { + for (j = 1; j < port->nr_snk_pdo; j++) { pdo = port->snk_pdo[j]; switch (pdo_type(pdo)) { diff --git a/fs/block_dev.c b/fs/block_dev.c index 0dd87aaeb39a..aba25414231a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, ret = bio_iov_iter_get_pages(&bio, iter); if (unlikely(ret)) - return ret; + goto out; ret = bio.bi_iter.bi_size; if (iov_iter_rw(iter) == READ) { @@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, put_page(bvec->bv_page); } - if (vecs != inline_vecs) - kfree(vecs); - if (unlikely(bio.bi_status)) ret = blk_status_to_errno(bio.bi_status); +out: + if (vecs != inline_vecs) + kfree(vecs); + bio_uninit(&bio); return ret; diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index d9f001078e08..4a717d400807 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c @@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) "%s", fsdef->dentry->d_sb->s_id); - fscache_object_init(&fsdef->fscache, NULL, &cache->cache); + fscache_object_init(&fsdef->fscache, &fscache_fsdef_index, + &cache->cache); ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); if (ret < 0) diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index ab0bbe93b398..af2b17b21b94 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -186,12 +186,12 @@ try_again: * need to wait for it to be destroyed */ wait_for_old_object: trace_cachefiles_wait_active(object, dentry, xobject); + clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); if (fscache_object_is_live(&xobject->fscache)) { pr_err("\n"); pr_err("Error: Unexpected object collision\n"); cachefiles_printk_object(object, xobject); - BUG(); } atomic_inc(&xobject->usage); write_unlock(&cache->active_lock); @@ -248,7 +248,6 @@ wait_for_old_object: goto try_again; requeue: - clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); _leave(" = -ETIMEDOUT"); return -ETIMEDOUT; diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 5082c8a49686..40f7595aad10 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, struct cachefiles_one_read *monitor = container_of(wait, struct cachefiles_one_read, monitor); struct cachefiles_object *object; + struct fscache_retrieval *op = monitor->op; struct wait_bit_key *key = _key; struct page *page = wait->private; @@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, list_del(&wait->entry); /* move onto the action list and queue for FS-Cache thread pool */ - ASSERT(monitor->op); + ASSERT(op); - object = container_of(monitor->op->op.object, - struct cachefiles_object, fscache); + /* We need to temporarily bump the usage count as we don't own a ref + * here otherwise cachefiles_read_copier() may free the op between the + * monitor being enqueued on the op->to_do list and the op getting + * enqueued on the work queue. + */ + fscache_get_retrieval(op); + object = container_of(op->op.object, struct cachefiles_object, fscache); spin_lock(&object->work_lock); - list_add_tail(&monitor->op_link, &monitor->op->to_do); + list_add_tail(&monitor->op_link, &op->to_do); spin_unlock(&object->work_lock); - fscache_enqueue_retrieval(monitor->op); + fscache_enqueue_retrieval(op); + fscache_put_retrieval(op); return 0; } diff --git a/fs/exec.c b/fs/exec.c index 72e961a62adb..bdd0eacefdf5 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -293,6 +293,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) bprm->vma = vma = vm_area_alloc(mm); if (!vma) return -ENOMEM; + vma_set_anonymous(vma); if (down_write_killable(&mm->mmap_sem)) { err = -EINTR; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index e68cefe08261..aa52d87985aa 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -368,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb, return -EFSCORRUPTED; ext4_lock_group(sb, block_group); + if (buffer_verified(bh)) + goto verified; if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, desc, bh))) { ext4_unlock_group(sb, block_group); @@ -386,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb, return -EFSCORRUPTED; } set_buffer_verified(bh); +verified: ext4_unlock_group(sb, block_group); return 0; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index fb83750c1a14..f336cbc6e932 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, return -EFSCORRUPTED; ext4_lock_group(sb, block_group); + if (buffer_verified(bh)) + goto verified; blk = ext4_inode_bitmap(sb, desc); if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, EXT4_INODES_PER_GROUP(sb) / 8)) { @@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, return -EFSBADCRC; } set_buffer_verified(bh); +verified: ext4_unlock_group(sb, block_group); return 0; } @@ -1385,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, ext4_itable_unused_count(sb, gdp)), sbi->s_inodes_per_block); - if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { + if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || + ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - + ext4_itable_unused_count(sb, gdp)) < + EXT4_FIRST_INO(sb)))) { ext4_error(sb, "Something is wrong with group %u: " "used itable blocks: %d; " "itable unused count: %u", diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index e55a8bc870bd..3543fe80a3c4 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, goto convert; } + ret = ext4_journal_get_write_access(handle, iloc.bh); + if (ret) + goto out; + flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, 0, flags); @@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, out_up_read: up_read(&EXT4_I(inode)->xattr_sem); out: - if (handle) + if (handle && (ret != 1)) ext4_journal_stop(handle); brelse(iloc.bh); return ret; @@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, ext4_write_unlock_xattr(inode, &no_expand); brelse(iloc.bh); + mark_inode_dirty(inode); out: return copied; } @@ -898,7 +903,6 @@ retry_journal: goto out; } - page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) { ret = -ENOMEM; @@ -916,6 +920,9 @@ retry_journal: if (ret < 0) goto out_release_page; } + ret = ext4_journal_get_write_access(handle, iloc.bh); + if (ret) + goto out_release_page; up_read(&EXT4_I(inode)->xattr_sem); *pagep = page; @@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, struct page *page) { - int i_size_changed = 0; int ret; ret = ext4_write_inline_data_end(inode, pos, len, copied, page); @@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ - if (pos+copied > inode->i_size) { + if (pos+copied > inode->i_size) i_size_write(inode, pos+copied); - i_size_changed = 1; - } unlock_page(page); put_page(page); @@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, * ordering of page lock and transaction start for journaling * filesystems. */ - if (i_size_changed) - mark_inode_dirty(inode); + mark_inode_dirty(inode); return copied; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7d6c10017bdf..4efe77286ecd 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file, loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; + int inline_data = ext4_has_inline_data(inode); trace_ext4_write_end(inode, pos, len, copied); - if (ext4_has_inline_data(inode)) { + if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { @@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file, * ordering of page lock and transaction start for journaling * filesystems. */ - if (i_size_changed) + if (i_size_changed || inline_data) ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && ext4_can_truncate(inode)) @@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file, int partial = 0; unsigned from, to; int size_changed = 0; + int inline_data = ext4_has_inline_data(inode); trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_SIZE - 1); @@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file, BUG_ON(!ext4_handle_valid(handle)); - if (ext4_has_inline_data(inode)) { + if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { @@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file, if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); - if (size_changed) { + if (size_changed || inline_data) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; @@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page, } if (inline_data) { - BUFFER_TRACE(inode_bh, "get write access"); - ret = ext4_journal_get_write_access(handle, inode_bh); - - err = ext4_handle_dirty_metadata(handle, inode, inode_bh); - + ret = ext4_mark_inode_dirty(handle, inode); } else { ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 27b9a76a0dfa..638ad4743477 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -186,11 +186,8 @@ static int kmmpd(void *data) goto exit_thread; } - if (sb_rdonly(sb)) { - ext4_warning(sb, "kmmpd being stopped since filesystem " - "has been remounted as readonly."); - goto exit_thread; - } + if (sb_rdonly(sb)) + break; diff = jiffies - last_update_time; if (diff < mmp_update_interval * HZ) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index ba2396a7bd04..b7f7922061be 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2342,7 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; - ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; @@ -3141,14 +3141,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) if (!gdp) continue; - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) - continue; - if (group != 0) + if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; - ext4_error(sb, "Inode table for bg 0 marked as " - "needing zeroing"); - if (sb_rdonly(sb)) - return ngroups; } return group; @@ -4085,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount2; } } + sbi->s_gdb_count = db_count; if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ret = -EFSCORRUPTED; goto failed_mount2; } - sbi->s_gdb_count = db_count; - timer_setup(&sbi->s_err_report, print_daily_error_info, 0); /* Register extent status tree shrinker */ @@ -5213,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); + if (sbi->s_mmp_tsk) + kthread_stop(sbi->s_mmp_tsk); } else { /* Make sure we can mount this feature set readwrite */ if (ext4_has_feature_readonly(sb) || diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c index c184c5a356ff..cdcb376ef8df 100644 --- a/fs/fscache/cache.c +++ b/fs/fscache/cache.c @@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache, { struct fscache_cache_tag *tag; + ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index); BUG_ON(!cache->ops); BUG_ON(!ifsdef); @@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache, if (!cache->kobj) goto error; - ifsdef->cookie = &fscache_fsdef_index; ifsdef->cache = cache; cache->fsdef = ifsdef; diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 97137d7ec5ee..83bfe04456b6 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache, goto error; } + ASSERTCMP(object->cookie, ==, cookie); fscache_stat(&fscache_n_object_alloc); object->debug_id = atomic_inc_return(&fscache_object_debug_id); @@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie, _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); + ASSERTCMP(object->cookie, ==, cookie); + spin_lock(&cookie->lock); /* there may be multiple initial creations of this object, but we only @@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, spin_unlock(&cache->object_list_lock); } - /* attach to the cookie */ - object->cookie = cookie; - fscache_cookie_get(cookie, fscache_cookie_get_attach_object); + /* Attach to the cookie. The object already has a ref on it. */ hlist_add_head(&object->cookie_link, &cookie->backing_objects); fscache_objlist_add(object); diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 20e0d0a4dc8c..9edc920f651f 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object, object->store_limit_l = 0; object->cache = cache; object->cookie = cookie; + fscache_cookie_get(cookie, fscache_cookie_get_attach_object); object->parent = NULL; #ifdef CONFIG_FSCACHE_OBJECT_LIST RB_CLEAR_NODE(&object->objlist_link); diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index e30c5975ea58..8d265790374c 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) ASSERT(op->processor != NULL); ASSERT(fscache_object_is_available(op->object)); ASSERTCMP(atomic_read(&op->usage), >, 0); - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, + op->state, ==, FSCACHE_OP_ST_CANCELLED); fscache_stat(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { @@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op) struct fscache_cache *cache; _enter("{OBJ%x OP%x,%d}", - op->object->debug_id, op->debug_id, atomic_read(&op->usage)); + op->object ? op->object->debug_id : 0, + op->debug_id, atomic_read(&op->usage)); ASSERTCMP(atomic_read(&op->usage), >, 0); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d508c7844681..40d4c66c7751 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, bool truncate_op = (lend == LLONG_MAX); memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + vma_init(&pseudo_vma, current->mm); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pagevec_init(&pvec); next = start; @@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, * as input to create an allocation policy. */ memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + vma_init(&pseudo_vma, mm); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pseudo_vma.vm_file = file; diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 23813c078cc9..0839efa720b3 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer, TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); + if (unlikely(length < 0)) + return -EIO; + while (length) { entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); if (entry->error) { diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 13d80947bf9e..fcff2e0487fe 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n, } for (i = 0; i < blocks; i++) { - int size = le32_to_cpu(blist[i]); + int size = squashfs_block_size(blist[i]); + if (size < 0) { + err = size; + goto failure; + } block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); } n -= blocks; @@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) sizeof(size)); if (res < 0) return res; - return le32_to_cpu(size); + return squashfs_block_size(size); } /* Copy data into page cache */ diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index 0ed6edbc5c71..86ad9a4b8c36 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c @@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, return size; *fragment_block = le64_to_cpu(fragment_entry.start_block); - size = le32_to_cpu(fragment_entry.size); - - return size; + return squashfs_block_size(fragment_entry.size); } diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 24d12fd14177..4e6853f084d0 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -129,6 +129,12 @@ #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) +static inline int squashfs_block_size(__le32 raw) +{ + u32 size = le32_to_cpu(raw); + return (size >> 25) ? -EIO : size; +} + /* * Inode number ops. Inodes consist of a compressed block number, and an * uncompressed offset within that block diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index eef466260d43..75dbdc14c45f 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -223,12 +223,13 @@ xfs_alloc_get_rec( error = xfs_btree_get_rec(cur, &rec, stat); if (error || !(*stat)) return error; - if (rec->alloc.ar_blockcount == 0) - goto out_bad_rec; *bno = be32_to_cpu(rec->alloc.ar_startblock); *len = be32_to_cpu(rec->alloc.ar_blockcount); + if (*len == 0) + goto out_bad_rec; + /* check for valid extent range, including overflow */ if (!xfs_verify_agbno(mp, agno, *bno)) goto out_bad_rec; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 33dc34655ac3..30d1d60f1d46 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -731,7 +731,8 @@ xfs_inode_validate_extsize( if ((hint_flag || inherit_flag) && extsize == 0) return __this_address; - if (!(hint_flag || inherit_flag) && extsize != 0) + /* free inodes get flags set to zero but extsize remains */ + if (mode && !(hint_flag || inherit_flag) && extsize != 0) return __this_address; if (extsize_bytes % blocksize_bytes) @@ -777,7 +778,8 @@ xfs_inode_validate_cowextsize( if (hint_flag && cowextsize == 0) return __this_address; - if (!hint_flag && cowextsize != 0) + /* free inodes get flags set to zero but cowextsize remains */ + if (mode && !hint_flag && cowextsize != 0) return __this_address; if (hint_flag && rt_flag) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index e3147eb74222..ca3f2c2edd85 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); +/** + * blk_mq_mark_complete() - Set request state to complete + * @rq: request to set to complete state + * + * Returns true if request state was successfully set to complete. If + * successful, the caller is responsibile for seeing this request is ended, as + * blk_mq_complete_request will not work again. + */ +static inline bool blk_mq_mark_complete(struct request *rq) +{ + return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == + MQ_RQ_IN_FLIGHT; +} + /* * Driver command data is immediately after the request. So subtract request * size to get back to the original request, add request size to get the PDU. diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index 687b1760bb9f..f02cee0225d4 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -5,10 +5,10 @@ #include <uapi/linux/bpfilter.h> struct sock; -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen); -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval, - int *optlen); +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, + int __user *optlen); extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, char __user *optval, unsigned int optlen, bool is_set); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index e6c0448ebcc7..31c865d1842e 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void) static inline void delayacct_blkio_end(struct task_struct *p) { - if (current->delays) + if (p->delays) __delayacct_blkio_end(p); delayacct_clear_flag(DELAYACCT_PF_BLKIO); } diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 7094718b653b..ffcc7724ca21 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -11,6 +11,7 @@ #include <linux/fcntl.h> #include <linux/wait.h> +#include <linux/err.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 80cbb7fdce4a..83957920653a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl { struct mlx5_frag_buf frag_buf; u32 sz_m1; u32 frag_sz_m1; + u32 strides_offset; u8 log_sz; u8 log_stride; u8 log_frag_strides; @@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } -static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, - struct mlx5_frag_buf_ctrl *fbc) +static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, + u32 strides_offset, + struct mlx5_frag_buf_ctrl *fbc) { fbc->log_stride = log_stride; fbc->log_sz = log_sz; fbc->sz_m1 = (1 << fbc->log_sz) - 1; fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, + struct mlx5_frag_buf_ctrl *fbc) +{ + mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); } static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, @@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, u32 ix) { - unsigned int frag = (ix >> fbc->log_frag_strides); + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; return fbc->frag_buf.frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); diff --git a/include/linux/mm.h b/include/linux/mm.h index d3a3842316b8..7ba6d356d18f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -452,6 +452,20 @@ struct vm_operations_struct { unsigned long addr); }; +static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) +{ + static const struct vm_operations_struct dummy_vm_ops = {}; + + vma->vm_mm = mm; + vma->vm_ops = &dummy_vm_ops; + INIT_LIST_HEAD(&vma->anon_vma_chain); +} + +static inline void vma_set_anonymous(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + struct mmu_gather; struct inode; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b72ebdff0b77..003d09ab308d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_off(struct ring_buffer *buffer); void ring_buffer_record_on(struct ring_buffer *buffer); int ring_buffer_record_is_on(struct ring_buffer *buffer); +int ring_buffer_record_is_set_on(struct ring_buffer *buffer); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 1b92a28dd672..6fd615a0eea9 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void rt_mutex_destroy(struct rt_mutex *lock); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); +#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) +#else extern void rt_mutex_lock(struct rt_mutex *lock); +#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) +#endif + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5fbfe61f41c6..1beb3ead0385 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, /** * cfg80211_rx_control_port - notification about a received control port frame * @dev: The device the frame matched to - * @buf: control port frame - * @len: length of the frame data - * @addr: The peer from which the frame was received - * @proto: frame protocol, typically PAE or Pre-authentication + * @skb: The skbuf with the control port frame. It is assumed that the skbuf + * is 802.3 formatted (with 802.3 header). The skb can be non-linear. + * This function does not take ownership of the skb, so the caller is + * responsible for any cleanup. The caller must also ensure that + * skb->protocol is set appropriately. * @unencrypted: Whether the frame was received unencrypted * * This function is used to inform userspace about a received control port @@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, * Return: %true if the frame was passed to userspace */ bool cfg80211_rx_control_port(struct net_device *dev, - const u8 *buf, size_t len, - const u8 *addr, u16 proto, bool unencrypted); + struct sk_buff *skb, bool unencrypted); /** * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 71b9043aa0e7..3d4930528db0 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i) atomic_inc(&f6i->fib6_ref); } +static inline bool fib6_info_hold_safe(struct fib6_info *f6i) +{ + return atomic_inc_not_zero(&f6i->fib6_ref); +} + static inline void fib6_info_release(struct fib6_info *f6i) { if (f6i && atomic_dec_and_test(&f6i->fib6_ref)) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 08c005ce56e9..dc417ef0a0c5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data) * @portid: netlink portID of the original message * @seq: netlink sequence number * @family: protocol family + * @level: depth of the chains * @report: notify via unicast netlink message */ struct nft_ctx { @@ -160,6 +161,7 @@ struct nft_ctx { u32 portid; u32 seq; u8 family; + u8 level; bool report; }; @@ -865,7 +867,6 @@ enum nft_chain_flags { * @table: table that this chain belongs to * @handle: chain handle * @use: number of jump references to this chain - * @level: length of longest path to this chain * @flags: bitmask of enum nft_chain_flags * @name: name of the chain */ @@ -878,7 +879,6 @@ struct nft_chain { struct nft_table *table; u64 handle; u32 use; - u16 level; u8 flags:6, genmask:2; char *name; @@ -1124,7 +1124,6 @@ struct nft_flowtable { u32 genmask:2, use:30; u64 handle; - char *dev_name[NFT_FLOWTABLE_DEVICE_MAX]; /* runtime data below here */ struct nf_hook_ops *ops ____cacheline_aligned; struct nf_flowtable data; diff --git a/include/net/tcp.h b/include/net/tcp.h index 3482d13d655b..cd3ecda9386a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -539,6 +540,7 @@ void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); int tcp_send_synack(struct sock *); void tcp_push_one(struct sock *, unsigned int mss_now); +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); void tcp_send_loss_probe(struct sock *sk); @@ -839,6 +841,11 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) */ static inline int tcp_v6_iif(const struct sk_buff *skb) { + return TCP_SKB_CB(skb)->header.h6.iif; +} + +static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) +{ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 0b5ddbe135a4..972265f32871 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -76,7 +76,7 @@ struct btf_type { */ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) -#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) +#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) /* Attributes stored in the BTF_INT_ENCODING */ #define BTF_INT_SIGNED (1 << 0) diff --git a/ipc/sem.c b/ipc/sem.c index 5af1943ad782..76e95e4f3aa2 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, } do { - queue.status = -EINTR; + WRITE_ONCE(queue.status, -EINTR); queue.sleeper = current; __set_current_state(TASK_INTERRUPTIBLE); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e016ac3afa24..9704934252b3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) */ static bool btf_type_int_is_regular(const struct btf_type *t) { - u16 nr_bits, nr_bytes; + u8 nr_bits, nr_bytes; u32 int_data; int_data = btf_type_int(t); @@ -993,12 +993,16 @@ static void btf_int_bits_seq_show(const struct btf *btf, { u16 left_shift_bits, right_shift_bits; u32 int_data = btf_type_int(t); - u16 nr_bits = BTF_INT_BITS(int_data); - u16 total_bits_offset; - u16 nr_copy_bytes; - u16 nr_copy_bits; + u8 nr_bits = BTF_INT_BITS(int_data); + u8 total_bits_offset; + u8 nr_copy_bytes; + u8 nr_copy_bits; u64 print_num; + /* + * bits_offset is at most 7. + * BTF_INT_OFFSET() cannot exceed 64 bits. + */ total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); data += BITS_ROUNDDOWN_BYTES(total_bits_offset); bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); @@ -1028,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, u32 int_data = btf_type_int(t); u8 encoding = BTF_INT_ENCODING(int_data); bool sign = encoding & BTF_INT_SIGNED; - u32 nr_bits = BTF_INT_BITS(int_data); + u8 nr_bits = BTF_INT_BITS(int_data); if (bits_offset || BTF_INT_OFFSET(int_data) || BITS_PER_BYTE_MASKED(nr_bits)) { diff --git a/kernel/fork.c b/kernel/fork.c index a191c05e757d..1b27babc4c78 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -312,10 +312,8 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (vma) { - vma->vm_mm = mm; - INIT_LIST_HEAD(&vma->anon_vma_chain); - } + if (vma) + vma_init(vma, mm); return vma; } diff --git a/kernel/kthread.c b/kernel/kthread.c index 750cb8082694..486dedbd9af5 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), task = create->result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; + char name[TASK_COMM_LEN]; - vsnprintf(task->comm, sizeof(task->comm), namefmt, args); + /* + * task is already visible to other tasks, so updating + * COMM must be protected. + */ + vsnprintf(name, sizeof(name), namefmt, args); + set_task_comm(task, name); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4f014be7a4b8..2823d4163a37 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock, rt_mutex_postunlock(&wake_q); } +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) +{ + might_sleep(); + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/** + * rt_mutex_lock_nested - lock a rt_mutex + * + * @lock: the rt_mutex to be locked + * @subclass: the lockdep subclass + */ +void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) +{ + __rt_mutex_lock(lock, subclass); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); +#endif + +#ifndef CONFIG_DEBUG_LOCK_ALLOC /** * rt_mutex_lock - lock a rt_mutex * @@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock, */ void __sched rt_mutex_lock(struct rt_mutex *lock) { - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + __rt_mutex_lock(lock, 0); } EXPORT_SYMBOL_GPL(rt_mutex_lock); +#endif /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible diff --git a/kernel/memremap.c b/kernel/memremap.c index 5857267a4af5..38283363da06 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; int error, nid, is_ram; + struct dev_pagemap *conflict_pgmap; align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) - align_start; + align_end = align_start + align_size - 1; + + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL); + if (conflict_pgmap) { + dev_WARN(dev, "Conflicting mapping in same section\n"); + put_dev_pagemap(conflict_pgmap); + return ERR_PTR(-ENOMEM); + } + + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL); + if (conflict_pgmap) { + dev_WARN(dev, "Conflicting mapping in same section\n"); + put_dev_pagemap(conflict_pgmap); + return ERR_PTR(-ENOMEM); + } + is_ram = region_intersects(align_start, align_size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); @@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) mutex_lock(&pgmap_lock); error = 0; - align_end = align_start + align_size - 1; foreach_order_pgoff(res, order, pgoff) { error = __radix_tree_insert(&pgmap_radix, @@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); #ifdef CONFIG_DEV_PAGEMAP_OPS DEFINE_STATIC_KEY_FALSE(devmap_managed_key); -EXPORT_SYMBOL_GPL(devmap_managed_key); +EXPORT_SYMBOL(devmap_managed_key); static atomic_t devmap_enable; /* @@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page) } else if (!count) __put_page(page); } -EXPORT_SYMBOL_GPL(__put_devmap_managed_page); +EXPORT_SYMBOL(__put_devmap_managed_page); #endif /* CONFIG_DEV_PAGEMAP_OPS */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 10c7b51c0d1f..b5fbdde6afa9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2090,8 +2090,14 @@ retry: sub_rq_bw(&next_task->dl, &rq->dl); set_task_cpu(next_task, later_rq->cpu); add_rq_bw(&next_task->dl, &later_rq->dl); + + /* + * Update the later_rq clock here, because the clock is used + * by the cpufreq_update_util() inside __add_running_bw(). + */ + update_rq_clock(later_rq); add_running_bw(&next_task->dl, &later_rq->dl); - activate_task(later_rq, next_task, 0); + activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); ret = 1; resched_curr(later_rq); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 572567078b60..eaaec8364f96 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) * can be time-consuming. Try to avoid it when possible. */ raw_spin_lock(&rt_rq->rt_runtime_lock); + if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) + rt_rq->rt_runtime = rt_b->rt_runtime; skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; raw_spin_unlock(&rt_rq->rt_runtime_lock); if (skip) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 05a831427bc7..56a0fed30c0a 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); } - if (!cpumask_test_cpu(cpu, sched_group_span(group))) { + if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); } diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 1ff523dae6e2..e190d1ef3a23 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -260,6 +260,15 @@ retry: err = 0; __cpu_stop_queue_work(stopper1, work1, &wakeq); __cpu_stop_queue_work(stopper2, work2, &wakeq); + /* + * The waking up of stopper threads has to happen + * in the same scheduling context as the queueing. + * Otherwise, there is a possibility of one of the + * above stoppers being woken up by another CPU, + * and preempting us. This will cause us to n ot + * wake up the other stopper forever. + */ + preempt_disable(); unlock: raw_spin_unlock(&stopper2->lock); raw_spin_unlock_irq(&stopper1->lock); @@ -271,7 +280,6 @@ unlock: } if (!err) { - preempt_disable(); wake_up_q(&wakeq); preempt_enable(); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6a46af21765c..0b0b688ea166 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3227,6 +3227,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) } /** + * ring_buffer_record_is_set_on - return true if the ring buffer is set writable + * @buffer: The ring buffer to see if write is set enabled + * + * Returns true if the ring buffer is set writable by ring_buffer_record_on(). + * Note that this does NOT mean it is in a writable state. + * + * It may return true when the ring buffer has been disabled by + * ring_buffer_record_disable(), as that is a temporary disabling of + * the ring buffer. + */ +int ring_buffer_record_is_set_on(struct ring_buffer *buffer) +{ + return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); +} + +/** * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * @buffer: The ring buffer to stop writes to. * @cpu: The CPU buffer to stop diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 87cf25171fb8..823687997b01 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&tr->max_lock); + /* Inherit the recordable setting from trace_buffer */ + if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) + ring_buffer_record_on(tr->max_buffer.buffer); + else + ring_buffer_record_off(tr->max_buffer.buffer); + swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index d18249683682..5dea177cef53 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops, goto out_free; out_reg: + /* Up the trigger_data count to make sure reg doesn't free it on failure */ + event_trigger_init(trigger_ops, trigger_data); ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); /* * The above returns on success the # of functions enabled, @@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops, * Consider no functions a failure too. */ if (!ret) { + cmd_ops->unreg(glob, trigger_ops, trigger_data, file); ret = -ENOENT; - goto out_free; - } else if (ret < 0) - goto out_free; - ret = 0; + } else if (ret > 0) + ret = 0; + + /* Down the counter of trigger_data or free it if not used anymore */ + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops, goto out; } + /* Up the trigger_data count to make sure nothing frees it on failure */ + event_trigger_init(trigger_ops, trigger_data); + if (trigger) { number = strsep(&trigger, ":"); @@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, goto out_disable; /* Just return zero, not the number of enabled functions */ ret = 0; + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, out_free: if (cmd_ops->set_filter) cmd_ops->set_filter(NULL, trigger_data, NULL); - kfree(trigger_data); + event_trigger_free(trigger_ops, trigger_data); kfree(enable_data); goto out; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b37b92e7dbd4..e9d99463e5df 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, static int enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) { + struct event_file_link *link = NULL; int ret = 0; if (file) { - struct event_file_link *link; - link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; @@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) else ret = enable_kprobe(&tk->rp.kp); } + + if (ret) { + if (file) { + /* Notice the if is true on not WARN() */ + if (!WARN_ON_ONCE(!link)) + list_del_rcu(&link->list); + kfree(link); + tk->tp.flags &= ~TP_FLAG_TRACE; + } else { + tk->tp.flags &= ~TP_FLAG_PROFILE; + } + } out: return ret; } diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index c253c1b46c6b..befb127507c0 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" - depends on SLUB || (SLAB && !DEBUG_SLAB) + depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) select SLUB_DEBUG if SLUB select CONSTRUCTORS select STACKDEPOT diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9ac49ef17b4e..01f1a14facc4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); + vma_init(&pvma, NULL); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ diff --git a/mm/mmap.c b/mm/mmap.c index ff1944d8d458..17bbf4d3e24f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1778,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, error = shmem_zero_setup(vma); if (error) goto free_vma; + } else { + vma_set_anonymous(vma); } vma_link(mm, vma, prev, rb_link, rb_parent); @@ -2983,6 +2985,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla return -ENOMEM; } + vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; diff --git a/mm/nommu.c b/mm/nommu.c index 1d22fdbf7d7c..9fc9e43335b6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma, if (ret < len) memset(base + ret, 0, len - ret); + } else { + vma_set_anonymous(vma); } return 0; diff --git a/mm/shmem.c b/mm/shmem.c index 2cab84403055..41b9bbf24e16 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma, { /* Create a pseudo vma that just contains the policy */ memset(vma, 0, sizeof(*vma)); + vma_init(vma, NULL); /* Bias interleave by inode number to distribute better across nodes */ vma->vm_pgoff = index + info->vfs_inode.i_ino; vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); diff --git a/mm/zswap.c b/mm/zswap.c index 7d34e69507e3..cd91fd9d96b8 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ret = -ENOMEM; goto reject; } + + /* A second zswap_is_full() check after + * zswap_shrink() to make sure it's now + * under the max_pool_percent + */ + if (zswap_is_full()) { + ret = -ENOMEM; + goto reject; + } } /* allocate entry */ diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index e0adcd123f48..711d7156efd8 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) caifd = caif_get(skb->dev); WARN_ON(caifd == NULL); - if (caifd == NULL) + if (!caifd) { + rcu_read_unlock(); return; + } caifd_hold(caifd); rcu_read_unlock(); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 68bf07206744..43a932cb609b 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool) struct page *page; /* Empty recycle ring */ - while ((page = ptr_ring_consume(&pool->ring))) { + while ((page = ptr_ring_consume_bh(&pool->ring))) { /* Verify the refcnt invariant of cached pages */ if (!(page_ref_count(page) == 1)) pr_crit("%s() page_pool refcnt %d violation\n", diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ef61222fdef..e3f743c141b3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e51f8555e11..fb35b62af272 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3720,6 +3720,7 @@ normal: net_warn_ratelimited( "skb_segment: too many frags: %u %u\n", pos, mss); + err = -EINVAL; goto err; } @@ -3753,11 +3754,10 @@ skip_fraglist: perform_csum_check: if (!csum) { - if (skb_has_shared_frag(nskb)) { - err = __skb_linearize(nskb); - if (err) - goto err; - } + if (skb_has_shared_frag(nskb) && + __skb_linearize(nskb)) + goto err; + if (!nskb->remcsum_offload) nskb->ip_summed = CHECKSUM_NONE; SKB_GSO_CB(nskb)->csum = diff --git a/net/core/sock.c b/net/core/sock.c index 9e8f65585b81..bc2d7a37297f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg, pfrag->offset += use; sge = sg + sg_curr - 1; - if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && - sg->offset + sg->length == orig_offset) { - sg->length += use; + if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page && + sge->offset + sge->length == orig_offset) { + sge->length += use; } else { sge = sg + sg_curr; sg_unmark_end(sge); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index b3c899a630a0..28fef7d15959 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) spin_lock_bh(&im->lock); if (pmc) { im->interface = pmc->interface; - im->sfmode = pmc->sfmode; - if (pmc->sfmode == MCAST_INCLUDE) { + if (im->sfmode == MCAST_INCLUDE) { im->tomb = pmc->tomb; im->sources = pmc->sources; for (psf = im->sources; psf; psf = psf->sf_next) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b3308e9d9762..0e3edd25f881 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 64c76dcf7386..c0fe5ad996f2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; const struct iphdr *iph = ip_hdr(skb); - __be16 *ports = (__be16 *)skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 > (int)skb->len) + end = skb_transport_offset(skb) + 4; + if (end > 0 && !pskb_may_pull(skb, end)) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin.sin_family = AF_INET; sin.sin_addr.s_addr = iph->daddr; diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5869f89ca656..8b637f9f23a2 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=0 to CE=1 and delayed - * ACK has not sent yet. - */ - if (!ca->ce_state && - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=0. */ - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (!ca->ce_state) { + /* State has changed from CE=0 to CE=1, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=1 to CE=0 and delayed - * ACK has not sent yet. - */ - if (ca->ce_state && - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=1. */ - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (ca->ce_state) { + /* State has changed from CE=1 to CE=0, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8e5522c6833a..3bcd30a2ba06 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) icsk->icsk_ack.quick = quickacks; } -static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } +EXPORT_SYMBOL(tcp_enter_quickack_mode); /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. @@ -4357,6 +4358,23 @@ static bool tcp_try_coalesce(struct sock *sk, return true; } +static bool tcp_ooo_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from, + bool *fragstolen) +{ + bool res = tcp_try_coalesce(sk, to, from, fragstolen); + + /* In case tcp_drop() is called later, update to->gso_segs */ + if (res) { + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + + max_t(u16, 1, skb_shinfo(from)->gso_segs); + + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); + } + return res; +} + static void tcp_drop(struct sock *sk, struct sk_buff *skb) { sk_drops_add(sk, skb); @@ -4480,8 +4498,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) /* In the typical case, we are adding an skb to the end of the list. * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. */ - if (tcp_try_coalesce(sk, tp->ooo_last_skb, - skb, &fragstolen)) { + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, + skb, &fragstolen)) { coalesce_done: tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); @@ -4509,7 +4527,7 @@ coalesce_done: /* All the bits are present. Drop. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb); + tcp_drop(sk, skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; @@ -4528,11 +4546,11 @@ coalesce_done: TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb1); + tcp_drop(sk, skb1); goto merge_right; } - } else if (tcp_try_coalesce(sk, skb1, - skb, &fragstolen)) { + } else if (tcp_ooo_try_coalesce(sk, skb1, + skb, &fragstolen)) { goto coalesce_done; } p = &parent->rb_right; @@ -4901,6 +4919,7 @@ end: static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + u32 range_truesize, sum_tiny = 0; struct sk_buff *skb, *head; u32 start, end; @@ -4912,6 +4931,7 @@ new_range: } start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; + range_truesize = skb->truesize; for (head = skb;;) { skb = skb_rb_next(skb); @@ -4922,11 +4942,20 @@ new_range: if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { - tcp_collapse(sk, NULL, &tp->out_of_order_queue, - head, skb, start, end); + /* Do not attempt collapsing tiny skbs */ + if (range_truesize != head->truesize || + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { + tcp_collapse(sk, NULL, &tp->out_of_order_queue, + head, skb, start, end); + } else { + sum_tiny += range_truesize; + if (sum_tiny > sk->sk_rcvbuf >> 3) + return; + } goto new_range; } + range_truesize += skb->truesize; if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) @@ -4941,6 +4970,7 @@ new_range: * 2) not add too big latencies if thousands of packets sit there. * (But if application shrinks SO_RCVBUF, we could still end up * freeing whole queue here) + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. * * Return true if queue has shrunk. */ @@ -4948,20 +4978,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct rb_node *node, *prev; + int goal; if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) return false; NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); + goal = sk->sk_rcvbuf >> 3; node = &tp->ooo_last_skb->rbnode; do { prev = rb_prev(node); rb_erase(node, &tp->out_of_order_queue); + goal -= rb_to_skb(node)->truesize; tcp_drop(sk, rb_to_skb(node)); - sk_mem_reclaim(sk); - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && - !tcp_under_memory_pressure(sk)) - break; + if (!prev || goal <= 0) { + sk_mem_reclaim(sk); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; + goal = sk->sk_rcvbuf >> 3; + } node = prev; } while (node); tp->ooo_last_skb = rb_to_skb(prev); @@ -4996,6 +5032,9 @@ static int tcp_prune_queue(struct sock *sk) else if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) + return 0; + tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, NULL, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 00e5a300ddb9..c4172c1fb198 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp, } /* Account for an ACK we sent. */ -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + u32 rcv_nxt) { struct tcp_sock *tp = tcp_sk(sk); @@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) __sock_put(sk); } + + if (unlikely(rcv_nxt != tp->rcv_nxt)) + return; /* Special ACK sent by DCTCP to reflect ECN */ tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } @@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */ -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, - gfp_t gfp_mask) +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; @@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); - th->ack_seq = htonl(tp->rcv_nxt); + th->ack_seq = htonl(rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); @@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, icsk->icsk_af_ops->send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); if (skb->len != tcp_header_size) { tcp_event_data_sent(tp, sk); @@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, return err; } +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask) +{ + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, + tcp_sk(sk)->rcv_nxt); +} + /* This routine just queues the buffer for sending. * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, @@ -3571,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk) } /* This routine sends an ack and also updates the window. */ -void tcp_send_ack(struct sock *sk) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) { struct sk_buff *buff; @@ -3604,9 +3615,14 @@ void tcp_send_ack(struct sock *sk) skb_set_tcp_pure_ack(buff); /* Send it off, this clears delayed acks for us. */ - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); +} +EXPORT_SYMBOL_GPL(__tcp_send_ack); + +void tcp_send_ack(struct sock *sk) +{ + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } -EXPORT_SYMBOL_GPL(tcp_send_ack); /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 91580c62bb86..f66a1cae3366 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, continue; if ((rt->fib6_flags & noflags) != 0) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; break; } out: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2ee08b6a86a4..1a1f876f8e28 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, } if (np->rxopt.bits.rxorigdstaddr) { struct sockaddr_in6 sin6; - __be16 *ports = (__be16 *) skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { + end = skb_transport_offset(skb) + 4; + if (end <= 0 || pskb_may_pull(skb, end)) { /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ipv6_hdr(skb)->daddr; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index be491bf6ab6e..ef2505aefc15 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb) /* for local traffic to local address, skb dev is the loopback * device. Check if there is a dst attached to the skb and if so - * get the real device index. + * get the real device index. Same is needed for replies to a link + * local address on a device enslaved to an L3 master device */ - if (unlikely(iif == LOOPBACK_IFINDEX)) { + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { const struct rt6_info *rt6 = skb_rt6_info(skb); if (rt6) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a14fb4fcdf18..3168847c30d1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 2699be7202be..f60f310785fd 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) spin_lock_bh(&im->mca_lock); if (pmc) { im->idev = pmc->idev; - im->mca_sfmode = pmc->mca_sfmode; - if (pmc->mca_sfmode == MCAST_INCLUDE) { + if (im->mca_sfmode == MCAST_INCLUDE) { im->mca_tomb = pmc->mca_tomb; im->mca_sources = pmc->mca_sources; for (psf = im->mca_sources; psf; psf = psf->sf_next) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2ce0bd17de4f..ec18b3ce8b6d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) rt->dst.lastuse = jiffies; } +/* Caller must already hold reference to @from */ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) { rt->rt6i_flags &= ~RTF_EXPIRES; - fib6_info_hold(from); rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); if (from->fib6_metrics != &dst_default_metrics) { @@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) } } +/* Caller must already hold reference to @ort */ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) { struct net_device *dev = fib6_info_nh_dev(ort); @@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) struct net_device *dev = rt->fib6_nh.nh_dev; struct rt6_info *nrt; + if (!fib6_info_hold_safe(rt)) + return NULL; + nrt = ip6_dst_alloc(dev_net(dev), dev, flags); if (nrt) ip6_rt_copy_init(nrt, rt); + else + fib6_info_release(rt); return nrt; } @@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, * Clone the route. */ + if (!fib6_info_hold_safe(ort)) + return NULL; + dev = ip6_rt_get_dev_rcu(ort); rt = ip6_dst_alloc(dev_net(dev), dev, 0); - if (!rt) + if (!rt) { + fib6_info_release(ort); return NULL; + } ip6_rt_copy_init(rt, ort); rt->rt6i_flags |= RTF_CACHE; @@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt) struct net_device *dev; struct rt6_info *pcpu_rt; + if (!fib6_info_hold_safe(rt)) + return NULL; + rcu_read_lock(); dev = ip6_rt_get_dev_rcu(rt); pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); rcu_read_unlock(); - if (!pcpu_rt) + if (!pcpu_rt) { + fib6_info_release(rt); return NULL; + } ip6_rt_copy_init(pcpu_rt, rt); pcpu_rt->rt6i_flags |= RTF_PCPU; return pcpu_rt; @@ -2486,7 +2502,7 @@ restart: out: if (ret) - dst_hold(&ret->dst); + ip6_hold_safe(net, &ret, true); else ret = ip6_create_rt_rcu(rt); @@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg, continue; if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; rcu_read_unlock(); /* if gateway was specified only delete the one hop */ @@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu rcu_read_lock(); from = rcu_dereference(rt->from); + /* This fib6_info_hold() is safe here because we hold reference to rt + * and rt already holds reference to fib6_info. + */ fib6_info_hold(from); rcu_read_unlock(); @@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net, continue; if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; break; } out: @@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net, ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) break; } - if (rt) - fib6_info_hold(rt); + if (rt && !fib6_info_hold_safe(rt)) + rt = NULL; rcu_read_unlock(); return rt; } @@ -3579,8 +3600,8 @@ restart: struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && - (!idev || idev->cnf.accept_ra != 2)) { - fib6_info_hold(rt); + (!idev || idev->cnf.accept_ra != 2) && + fib6_info_hold_safe(rt)) { rcu_read_unlock(); ip6_del_rt(net, rt); goto restart; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7efa9fd7e109..03e6b7a2bc53 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, - ntohs(th->source), tcp_v6_iif(skb), + ntohs(th->source), + tcp_v6_iif_l3_slave(skb), tcp_v6_sdif(skb)); if (!sk1) goto out; @@ -1609,7 +1610,8 @@ do_time_wait: skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, - ntohs(th->dest), tcp_v6_iif(skb), + ntohs(th->dest), + tcp_v6_iif_l3_slave(skb), sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0a38cc1cbebc..932985ca4e66 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, sdata->control_port_over_nl80211)) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); bool noencrypt = status->flag & RX_FLAG_DECRYPTED; - struct ethhdr *ehdr = eth_hdr(skb); - cfg80211_rx_control_port(dev, skb->data, skb->len, - ehdr->h_source, - be16_to_cpu(skb->protocol), noencrypt); + cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); } else { /* deliver to local stack */ diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5e2e511c4a6f..d02fbfec3783 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (!sta->uploaded) continue; - if (sta->sdata->vif.type != NL80211_IFTYPE_AP) + if (sta->sdata->vif.type != NL80211_IFTYPE_AP && + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) continue; for (state = IEEE80211_STA_NOTEXIST; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index abe647d5b8c6..9ce6336d1e55 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { @@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 896d4a36081d..f5745e4c6513 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, { ctx->net = net; ctx->family = family; + ctx->level = 0; ctx->table = table; ctx->chain = chain; ctx->nla = nla; @@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, struct nft_base_chain *basechain; struct nft_stats *stats = NULL; struct nft_chain_hook hook; - const struct nlattr *name; struct nf_hook_ops *ops; struct nft_trans *trans; int err; @@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return PTR_ERR(stats); } + err = -ENOMEM; trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); - if (trans == NULL) { - free_percpu(stats); - return -ENOMEM; - } + if (trans == NULL) + goto err; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, else nft_trans_chain_policy(trans) = -1; - name = nla[NFTA_CHAIN_NAME]; - if (nla[NFTA_CHAIN_HANDLE] && name) { - nft_trans_chain_name(trans) = - nla_strdup(name, GFP_KERNEL); - if (!nft_trans_chain_name(trans)) { - kfree(trans); - free_percpu(stats); - return -ENOMEM; + if (nla[NFTA_CHAIN_HANDLE] && + nla[NFTA_CHAIN_NAME]) { + struct nft_trans *tmp; + char *name; + + err = -ENOMEM; + name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + if (!name) + goto err; + + err = -EEXIST; + list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { + if (tmp->msg_type == NFT_MSG_NEWCHAIN && + tmp->ctx.table == table && + nft_trans_chain_update(tmp) && + nft_trans_chain_name(tmp) && + strcmp(name, nft_trans_chain_name(tmp)) == 0) { + kfree(name); + goto err; + } } + + nft_trans_chain_name(trans) = name; } list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; +err: + free_percpu(stats); + kfree(trans); + return err; } static int nf_tables_newchain(struct net *net, struct sock *nlsk, @@ -2254,6 +2271,39 @@ done: return skb->len; } +static int nf_tables_dump_rules_start(struct netlink_callback *cb) +{ + const struct nlattr * const *nla = cb->data; + struct nft_rule_dump_ctx *ctx = NULL; + + if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + if (nla[NFTA_RULE_TABLE]) { + ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], + GFP_ATOMIC); + if (!ctx->table) { + kfree(ctx); + return -ENOMEM; + } + } + if (nla[NFTA_RULE_CHAIN]) { + ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], + GFP_ATOMIC); + if (!ctx->chain) { + kfree(ctx->table); + kfree(ctx); + return -ENOMEM; + } + } + } + + cb->data = ctx; + return 0; +} + static int nf_tables_dump_rules_done(struct netlink_callback *cb) { struct nft_rule_dump_ctx *ctx = cb->data; @@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start= nf_tables_dump_rules_start, .dump = nf_tables_dump_rules, .done = nf_tables_dump_rules_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { - struct nft_rule_dump_ctx *ctx; - - ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); - if (!ctx) - return -ENOMEM; - - if (nla[NFTA_RULE_TABLE]) { - ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], - GFP_ATOMIC); - if (!ctx->table) { - kfree(ctx); - return -ENOMEM; - } - } - if (nla[NFTA_RULE_CHAIN]) { - ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], - GFP_ATOMIC); - if (!ctx->chain) { - kfree(ctx->table); - kfree(ctx); - return -ENOMEM; - } - } - c.data = ctx; - } - return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) struct nft_rule *rule; int err; + if (ctx->level == NFT_JUMP_STACK_SIZE) + return -EMLINK; + list_for_each_entry(rule, &chain->rules, list) { if (!nft_is_active_next(ctx->net, rule)) continue; @@ -3161,6 +3189,18 @@ done: return skb->len; } +static int nf_tables_dump_sets_start(struct netlink_callback *cb) +{ + struct nft_ctx *ctx_dump = NULL; + + ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC); + if (ctx_dump == NULL) + return -ENOMEM; + + cb->data = ctx_dump; + return 0; +} + static int nf_tables_dump_sets_done(struct netlink_callback *cb) { kfree(cb->data); @@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_sets_start, .dump = nf_tables_dump_sets, .done = nf_tables_dump_sets_done, + .data = &ctx, .module = THIS_MODULE, }; - struct nft_ctx *ctx_dump; - - ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC); - if (ctx_dump == NULL) - return -ENOMEM; - - *ctx_dump = ctx; - c.data = ctx_dump; return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -3849,6 +3883,15 @@ nla_put_failure: return -ENOSPC; } +static int nf_tables_dump_set_start(struct netlink_callback *cb) +{ + struct nft_set_dump_ctx *dump_ctx = cb->data; + + cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC); + + return cb->data ? 0 : -ENOMEM; +} + static int nf_tables_dump_set_done(struct netlink_callback *cb) { kfree(cb->data); @@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_set_start, .dump = nf_tables_dump_set, .done = nf_tables_dump_set_done, .module = THIS_MODULE, }; - struct nft_set_dump_ctx *dump_ctx; - - dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); - if (!dump_ctx) - return -ENOMEM; - - dump_ctx->set = set; - dump_ctx->ctx = ctx; + struct nft_set_dump_ctx dump_ctx = { + .set = set, + .ctx = ctx, + }; - c.data = dump_ctx; + c.data = &dump_ctx; return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -4975,38 +5015,42 @@ done: return skb->len; } -static int nf_tables_dump_obj_done(struct netlink_callback *cb) +static int nf_tables_dump_obj_start(struct netlink_callback *cb) { - struct nft_obj_filter *filter = cb->data; + const struct nlattr * const *nla = cb->data; + struct nft_obj_filter *filter = NULL; - if (filter) { - kfree(filter->table); - kfree(filter); + if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) { + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return -ENOMEM; + + if (nla[NFTA_OBJ_TABLE]) { + filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); + if (!filter->table) { + kfree(filter); + return -ENOMEM; + } + } + + if (nla[NFTA_OBJ_TYPE]) + filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); } + cb->data = filter; return 0; } -static struct nft_obj_filter * -nft_obj_filter_alloc(const struct nlattr * const nla[]) +static int nf_tables_dump_obj_done(struct netlink_callback *cb) { - struct nft_obj_filter *filter; - - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); - if (!filter) - return ERR_PTR(-ENOMEM); + struct nft_obj_filter *filter = cb->data; - if (nla[NFTA_OBJ_TABLE]) { - filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); - if (!filter->table) { - kfree(filter); - return ERR_PTR(-ENOMEM); - } + if (filter) { + kfree(filter->table); + kfree(filter); } - if (nla[NFTA_OBJ_TYPE]) - filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); - return filter; + return 0; } /* called with rcu_read_lock held */ @@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_obj_start, .dump = nf_tables_dump_obj, .done = nf_tables_dump_obj_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_OBJ_TABLE] || - nla[NFTA_OBJ_TYPE]) { - struct nft_obj_filter *filter; - - filter = nft_obj_filter_alloc(nla); - if (IS_ERR(filter)) - return -ENOMEM; - - c.data = filter; - } return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, flowtable->ops[i].priv = &flowtable->data; flowtable->ops[i].hook = flowtable->data.type->hook; flowtable->ops[i].dev = dev_array[i]; - flowtable->dev_name[i] = kstrdup(dev_array[i]->name, - GFP_KERNEL); } return err; @@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, err6: i = flowtable->ops_len; err5: - for (k = i - 1; k >= 0; k--) { - kfree(flowtable->dev_name[k]); + for (k = i - 1; k >= 0; k--) nf_unregister_net_hook(net, &flowtable->ops[k]); - } kfree(flowtable->ops); err4: @@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; for (i = 0; i < flowtable->ops_len; i++) { - if (flowtable->dev_name[i][0] && - nla_put_string(skb, NFTA_DEVICE_NAME, - flowtable->dev_name[i])) + const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev); + + if (dev && + nla_put_string(skb, NFTA_DEVICE_NAME, dev->name)) goto nla_put_failure; } nla_nest_end(skb, nest_devs); @@ -5650,37 +5683,39 @@ done: return skb->len; } -static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) +static int nf_tables_dump_flowtable_start(struct netlink_callback *cb) { - struct nft_flowtable_filter *filter = cb->data; + const struct nlattr * const *nla = cb->data; + struct nft_flowtable_filter *filter = NULL; - if (!filter) - return 0; + if (nla[NFTA_FLOWTABLE_TABLE]) { + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return -ENOMEM; - kfree(filter->table); - kfree(filter); + filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], + GFP_ATOMIC); + if (!filter->table) { + kfree(filter); + return -ENOMEM; + } + } + cb->data = filter; return 0; } -static struct nft_flowtable_filter * -nft_flowtable_filter_alloc(const struct nlattr * const nla[]) +static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) { - struct nft_flowtable_filter *filter; + struct nft_flowtable_filter *filter = cb->data; - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); if (!filter) - return ERR_PTR(-ENOMEM); + return 0; - if (nla[NFTA_FLOWTABLE_TABLE]) { - filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], - GFP_ATOMIC); - if (!filter->table) { - kfree(filter); - return ERR_PTR(-ENOMEM); - } - } - return filter; + kfree(filter->table); + kfree(filter); + + return 0; } /* called with rcu_read_lock held */ @@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_flowtable_start, .dump = nf_tables_dump_flowtable, .done = nf_tables_dump_flowtable_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_FLOWTABLE_TABLE]) { - struct nft_flowtable_filter *filter; - - filter = nft_flowtable_filter_alloc(nla); - if (IS_ERR(filter)) - return -ENOMEM; - - c.data = filter; - } return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) kfree(flowtable->name); flowtable->data.type->free(&flowtable->data); module_put(flowtable->data.type->owner); + kfree(flowtable); } static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, @@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, continue; nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); - flowtable->dev_name[i][0] = '\0'; flowtable->ops[i].dev = NULL; break; } @@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans) case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); break; + case NFT_MSG_NEWCHAIN: + kfree(nft_trans_chain_name(trans)); + break; case NFT_MSG_DELCHAIN: nf_tables_chain_destroy(&trans->ctx); break; @@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: - if (nft_trans_chain_update(trans)) + if (nft_trans_chain_update(trans)) { nft_chain_commit_update(trans); - else + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + /* trans destroyed after rcu grace period */ + } else { nft_clear(net, trans->ctx.chain); - - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); - nft_trans_destroy(trans); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + nft_trans_destroy(trans); + } break; case NFT_MSG_DELCHAIN: nft_chain_del(trans->ctx.chain); @@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net) case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { free_percpu(nft_trans_chain_stats(trans)); - + kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; @@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx, err = nf_tables_check_loops(ctx, data->verdict.chain); if (err < 0) return err; - - if (ctx->chain->level + 1 > - data->verdict.chain->level) { - if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) - return -EMLINK; - data->verdict.chain->level = ctx->chain->level + 1; - } } return 0; diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 15adf8ca82c3..0777a93211e2 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, const struct nft_data **d) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); + struct nft_ctx *pctx = (struct nft_ctx *)ctx; const struct nft_data *data; int err; @@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: + pctx->level++; err = nft_chain_validate(ctx, data->verdict.chain); if (err < 0) return err; + pctx->level--; break; default: break; diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 42e6fadf1417..c2a1d84cdfc4 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, struct nft_set_elem *elem) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); + struct nft_ctx *pctx = (struct nft_ctx *)ctx; const struct nft_data *data; + int err; if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) @@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - return nft_chain_validate(ctx, data->verdict.chain); + pctx->level++; + err = nft_chain_validate(ctx, data->verdict.chain); + if (err < 0) + return err; + pctx->level--; + break; default: - return 0; + break; } + + return 0; } static int nft_lookup_validate(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 72ef35b51cac..90c3e7e6cacb 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set) struct nft_rhash *priv = nft_set_priv(set); cancel_delayed_work_sync(&priv->gc_work); + rcu_barrier(); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)set); } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 1f8f257cb518..9873d734b494 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work) gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); if (!gcb) - goto out; + break; atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, rbe); @@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work) rbe = rb_entry(prev, struct nft_rbtree_elem, node); atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, rbe); + prev = NULL; } node = rb_next(node); + if (!node) + break; } -out: if (gcb) { for (i = 0; i < gcb->head.cnt; i++) { rbe = gcb->elems[i]; @@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) struct rb_node *node; cancel_delayed_work_sync(&priv->gc_work); + rcu_barrier(); while ((node = priv->root.rb_node) != NULL) { rb_erase(node, &priv->root); rbe = rb_entry(node, struct nft_rbtree_elem, node); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4618f1c31137..1f3d9789af30 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, return NULL; } + if (sk->sk_shutdown & RCV_SHUTDOWN) + return NULL; + if (sock_flag(sk, SOCK_DONE)) return NULL; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4eece06be1e7..80bc986c79e5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info, params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); + break; default: return -EINVAL; } @@ -14923,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, EXPORT_SYMBOL(cfg80211_mgmt_tx_status); static int __nl80211_rx_control_port(struct net_device *dev, - const u8 *buf, size_t len, - const u8 *addr, u16 proto, + struct sk_buff *skb, bool unencrypted, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + struct ethhdr *ehdr = eth_hdr(skb); + const u8 *addr = ehdr->h_source; + u16 proto = be16_to_cpu(skb->protocol); struct sk_buff *msg; void *hdr; + struct nlattr *frame; + u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); if (!nlportid) return -ENOENT; - msg = nlmsg_new(100 + len, gfp); + msg = nlmsg_new(100 + skb->len, gfp); if (!msg) return -ENOMEM; @@ -14950,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev, nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || - nla_put(msg, NL80211_ATTR_FRAME, len, buf) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || (unencrypted && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) goto nla_put_failure; + frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); + if (!frame) + goto nla_put_failure; + + skb_copy_bits(skb, 0, nla_data(frame), skb->len); genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); @@ -14967,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev, } bool cfg80211_rx_control_port(struct net_device *dev, - const u8 *buf, size_t len, - const u8 *addr, u16 proto, bool unencrypted) + struct sk_buff *skb, bool unencrypted) { int ret; - trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); - ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, - unencrypted, GFP_ATOMIC); + trace_cfg80211_rx_control_port(dev, skb, unencrypted); + ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC); trace_cfg80211_return_bool(ret == 0); return ret == 0; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bbe6298e4bb9..4fc66a117b7d 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && - wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) + wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && + !(wiphy->regulatory_flags & + REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } @@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request) } } -static bool reg_only_self_managed_wiphys(void) -{ - struct cfg80211_registered_device *rdev; - struct wiphy *wiphy; - bool self_managed_found = false; - - ASSERT_RTNL(); - - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - wiphy = &rdev->wiphy; - if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) - self_managed_found = true; - else - return false; - } - - /* make sure at least one self-managed wiphy exists */ - return self_managed_found; -} - /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we @@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); notify_self_managed_wiphys(reg_request); - if (reg_only_self_managed_wiphys()) { - reg_free_request(reg_request); - return; - } reg_process_hint(reg_request); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 2b417a2fe63f..7c73510b161f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status, ); TRACE_EVENT(cfg80211_rx_control_port, - TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, - const u8 *addr, u16 proto, bool unencrypted), - TP_ARGS(netdev, buf, len, addr, proto, unencrypted), + TP_PROTO(struct net_device *netdev, struct sk_buff *skb, + bool unencrypted), + TP_ARGS(netdev, skb, unencrypted), TP_STRUCT__entry( NETDEV_ENTRY - MAC_ENTRY(addr) + __field(int, len) + MAC_ENTRY(from) __field(u16, proto) __field(bool, unencrypted) ), TP_fast_assign( NETDEV_ASSIGN; - MAC_ASSIGN(addr, addr); - __entry->proto = proto; + __entry->len = skb->len; + MAC_ASSIGN(from, eth_hdr(skb)->h_source); + __entry->proto = be16_to_cpu(skb->protocol); __entry->unencrypted = unencrypted; ), - TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", - NETDEV_PR_ARG, MAC_PR_ARG(addr), + TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s", + NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), __entry->proto, BOOL_TO_STR(__entry->unencrypted)) ); diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h index ac5ba55066dd..985534d0b448 100644 --- a/tools/arch/powerpc/include/uapi/asm/unistd.h +++ b/tools/arch/powerpc/include/uapi/asm/unistd.h @@ -399,5 +399,6 @@ #define __NR_pkey_free 385 #define __NR_pkey_mprotect 386 #define __NR_rseq 387 +#define __NR_io_pgetevents 388 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h new file mode 100644 index 000000000000..2ccd588fbad4 --- /dev/null +++ b/tools/arch/x86/include/asm/mcsafe_test.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MCSAFE_TEST_H_ +#define _MCSAFE_TEST_H_ + +.macro MCSAFE_TEST_CTL +.endm + +.macro MCSAFE_TEST_SRC reg count target +.endm + +.macro MCSAFE_TEST_DST reg count target +.endm +#endif /* _MCSAFE_TEST_H_ */ diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 9a53a06e5a3e..298ef1479240 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -3,6 +3,7 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> +#include <asm/mcsafe_test.h> #include <asm/alternative-asm.h> #include <asm/export.h> @@ -183,12 +184,15 @@ ENTRY(memcpy_orig) ENDPROC(memcpy_orig) #ifndef CONFIG_UML + +MCSAFE_TEST_CTL + /* - * memcpy_mcsafe_unrolled - memory copy with machine check exception handling + * __memcpy_mcsafe - memory copy with machine check exception handling * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ -ENTRY(memcpy_mcsafe_unrolled) +ENTRY(__memcpy_mcsafe) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words @@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled) subl $8, %ecx negl %ecx subl %ecx, %edx -.L_copy_leading_bytes: +.L_read_leading_bytes: movb (%rsi), %al + MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes + MCSAFE_TEST_DST %rdi 1 .E_leading_bytes +.L_write_leading_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx - jnz .L_copy_leading_bytes + jnz .L_read_leading_bytes .L_8byte_aligned: - /* Figure out how many whole cache lines (64-bytes) to copy */ - movl %edx, %ecx - andl $63, %edx - shrl $6, %ecx - jz .L_no_whole_cache_lines - - /* Loop copying whole cache lines */ -.L_cache_w0: movq (%rsi), %r8 -.L_cache_w1: movq 1*8(%rsi), %r9 -.L_cache_w2: movq 2*8(%rsi), %r10 -.L_cache_w3: movq 3*8(%rsi), %r11 - movq %r8, (%rdi) - movq %r9, 1*8(%rdi) - movq %r10, 2*8(%rdi) - movq %r11, 3*8(%rdi) -.L_cache_w4: movq 4*8(%rsi), %r8 -.L_cache_w5: movq 5*8(%rsi), %r9 -.L_cache_w6: movq 6*8(%rsi), %r10 -.L_cache_w7: movq 7*8(%rsi), %r11 - movq %r8, 4*8(%rdi) - movq %r9, 5*8(%rdi) - movq %r10, 6*8(%rdi) - movq %r11, 7*8(%rdi) - leaq 64(%rsi), %rsi - leaq 64(%rdi), %rdi - decl %ecx - jnz .L_cache_w0 - - /* Are there any trailing 8-byte words? */ -.L_no_whole_cache_lines: movl %edx, %ecx andl $7, %edx shrl $3, %ecx jz .L_no_whole_words - /* Copy trailing words */ -.L_copy_trailing_words: +.L_read_words: movq (%rsi), %r8 - mov %r8, (%rdi) - leaq 8(%rsi), %rsi - leaq 8(%rdi), %rdi + MCSAFE_TEST_SRC %rsi 8 .E_read_words + MCSAFE_TEST_DST %rdi 8 .E_write_words +.L_write_words: + movq %r8, (%rdi) + addq $8, %rsi + addq $8, %rdi decl %ecx - jnz .L_copy_trailing_words + jnz .L_read_words /* Any trailing bytes? */ .L_no_whole_words: @@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled) /* Copy trailing bytes */ movl %edx, %ecx -.L_copy_trailing_bytes: +.L_read_trailing_bytes: movb (%rsi), %al + MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes + MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes +.L_write_trailing_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx - jnz .L_copy_trailing_bytes + jnz .L_read_trailing_bytes /* Copy successful. Return zero */ .L_done_memcpy_trap: xorq %rax, %rax ret -ENDPROC(memcpy_mcsafe_unrolled) -EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) +ENDPROC(__memcpy_mcsafe) +EXPORT_SYMBOL_GPL(__memcpy_mcsafe) .section .fixup, "ax" - /* Return -EFAULT for any failure */ -.L_memcpy_mcsafe_fail: - mov $-EFAULT, %rax + /* + * Return number of bytes not copied for any failure. Note that + * there is no "tail" handling since the source buffer is 8-byte + * aligned and poison is cacheline aligned. + */ +.E_read_words: + shll $3, %ecx +.E_leading_bytes: + addl %edx, %ecx +.E_trailing_bytes: + mov %ecx, %eax ret + /* + * For write fault handling, given the destination is unaligned, + * we handle faults on multi-byte writes with a byte-by-byte + * copy up to the write-protected page. + */ +.E_write_words: + shll $3, %ecx + addl %edx, %ecx + movl %ecx, %edx + jmp mcsafe_handle_tail + .previous - _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail) + _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) + _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) + _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE(.L_write_words, .E_write_words) + _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) #endif diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 32f9e397a6c0..3f140eff039f 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) int err; int fd; + if (argc < 3) { + p_err("too few arguments, id ID and FILE path is required"); + return -1; + } else if (argc > 3) { + p_err("too many arguments"); + return -1; + } + if (!is_prefix(*argv, "id")) { p_err("expected 'id' got %s", *argv); return -1; @@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) } NEXT_ARG(); - if (argc != 1) - usage(); - fd = get_fd_by_id(id); if (fd < 0) { p_err("can't get prog by id (%u): %s", id, strerror(errno)); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 59b19b6a40d7..b7db3261c62d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1857,7 +1857,8 @@ union bpf_attr { * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric - * is set to metric from route (IPv4/IPv6 only). + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the @@ -1873,9 +1874,10 @@ union bpf_attr { * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return - * Egress device index on success, 0 if packet needs to continue - * up the stack for further processing or a negative error in case - * of failure. + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args { #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) +enum { + BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ + BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ + BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ + BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ + BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ + BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ + BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ + BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ + BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ +}; + struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop @@ -2625,7 +2639,11 @@ struct bpf_fib_lookup { /* total length of packet from network header - used for MTU check */ __u16 tot_len; - __u32 ifindex; /* L3 device index for lookup */ + + /* input: L3 device index for lookup + * output: device index from FIB lookup + */ + __u32 ifindex; union { /* inputs to lookup */ diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index b8e288a1f740..eeb787b1c53c 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -143,6 +143,8 @@ enum perf_event_sample_format { PERF_SAMPLE_PHYS_ADDR = 1U << 19, PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ + + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, }; /* diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index 63a74c32ddc5..e33ef5bc31c5 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <string.h> +#include <linux/stddef.h> #include <linux/perf_event.h> #include "../../util/intel-pt.h" diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c index 06bae7023a51..950539f9a4f7 100644 --- a/tools/perf/arch/x86/util/tsc.c +++ b/tools/perf/arch/x86/util/tsc.c @@ -2,6 +2,7 @@ #include <stdbool.h> #include <errno.h> +#include <linux/stddef.h> #include <linux/perf_event.h> #include "../../perf.h" diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build index 60bf11943047..eafce1a130a1 100644 --- a/tools/perf/bench/Build +++ b/tools/perf/bench/Build @@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o perf-y += futex-requeue.o perf-y += futex-lock-pi.o +perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S index b43f8d2a34ec..9ad015a1e202 100644 --- a/tools/perf/bench/mem-memcpy-x86-64-asm.S +++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S @@ -6,6 +6,7 @@ #define altinstr_replacement text #define globl p2align 4; .globl #define _ASM_EXTABLE_FAULT(x, y) +#define _ASM_EXTABLE(x, y) #include "../../arch/x86/lib/memcpy_64.S" /* diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c new file mode 100644 index 000000000000..4130734dde84 --- /dev/null +++ b/tools/perf/bench/mem-memcpy-x86-64-lib.c @@ -0,0 +1,24 @@ +/* + * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy + * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy' + * happy. + */ +#include <linux/types.h> + +unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt); +unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); + +unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len) +{ + for (; len; --len, to++, from++) { + /* + * Call the assembly routine back directly since + * memcpy_mcsafe() may silently fallback to memcpy. + */ + unsigned long rem = __memcpy_mcsafe(to, from, 1); + + if (rem) + break; + } + return len; +} diff --git a/tools/perf/perf.h b/tools/perf/perf.h index a1a97956136f..d215714f48df 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -5,6 +5,7 @@ #include <time.h> #include <stdbool.h> #include <linux/types.h> +#include <linux/stddef.h> #include <linux/perf_event.h> extern bool test_attr__enabled; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 90d4577a92dc..6d7fe44aadc0 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -2,6 +2,7 @@ #ifndef __PERF_HEADER_H #define __PERF_HEADER_H +#include <linux/stddef.h> #include <linux/perf_event.h> #include <sys/types.h> #include <stdbool.h> diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h index 760558dcfd18..cae1a9a39722 100644 --- a/tools/perf/util/namespaces.h +++ b/tools/perf/util/namespaces.h @@ -10,6 +10,7 @@ #define __PERF_NAMESPACES_H #include <sys/types.h> +#include <linux/stddef.h> #include <linux/perf_event.h> #include <linux/refcount.h> #include <linux/types.h> diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7a6214e9ae58..a362e3d7abc6 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) -BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM) +BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') ifneq ($(BTF_LLC_PROBE),) ifneq ($(BTF_PAHOLE_PROBE),) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f5f7bcc96046..41106d9d5cc7 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -12005,6 +12005,46 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_XDP, }, { + "xadd/w check whether src/dst got mangled, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 3, + }, + { + "xadd/w check whether src/dst got mangled, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 3, + }, + { "bpf_get_stack return R0 within range", .insns = { BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc new file mode 100644 index 000000000000..3b1f45e13a2e --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc @@ -0,0 +1,28 @@ +#!/bin/sh +# description: Snapshot and tracing setting +# flags: instance + +[ ! -f snapshot ] && exit_unsupported + +echo "Set tracing off" +echo 0 > tracing_on + +echo "Allocate and take a snapshot" +echo 1 > snapshot + +# Since trace buffer is empty, snapshot is also empty, but allocated +grep -q "Snapshot is allocated" snapshot + +echo "Ensure keep tracing off" +test `cat tracing_on` -eq 0 + +echo "Set tracing on" +echo 1 > tracing_on + +echo "Take a snapshot again" +echo 1 > snapshot + +echo "Ensure keep tracing on" +test `cat tracing_on` -eq 1 + +exit 0 diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index 95dd14648ba5..0f395dfb7774 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -44,12 +44,25 @@ /******************** Little Endian Handling ********************************/ -#define cpu_to_le16(x) htole16(x) -#define cpu_to_le32(x) htole32(x) +/* + * cpu_to_le16/32 are used when initializing structures, a context where a + * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way + * that allows them to be used when initializing structures. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define cpu_to_le16(x) (x) +#define cpu_to_le32(x) (x) +#else +#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) +#define cpu_to_le32(x) \ + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ + (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) +#endif + #define le32_to_cpu(x) le32toh(x) #define le16_to_cpu(x) le16toh(x) - /******************** Messages and Errors ***********************************/ static const char argv0[] = "ffs-test"; |